From 32091a625a0e374c7e88000d2d09bd6bd6880bd1 Mon Sep 17 00:00:00 2001 From: fuhui Date: Sun, 14 Jan 2024 10:53:24 +0800 Subject: [PATCH 01/98] add redis init py --- modelcache/manager/vector_data/redis.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 modelcache/manager/vector_data/redis.py diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py new file mode 100644 index 0000000..5dc4e0d --- /dev/null +++ b/modelcache/manager/vector_data/redis.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" + Alipay.com Inc. + Copyright (c) 2004-2023 All Rights Reserved. + ------------------------------------------------------ + File Name : redis.py + Author : fuhui.phe + Create Time : 2024/1/14 10:53 + Description : description what the main function of this file + Change Activity: + version0 : 2024/1/14 10:53 by fuhui.phe init +""" From b4437f4fd23bd41bf852a14e48baa046e70eb58d Mon Sep 17 00:00:00 2001 From: fuhui Date: Sun, 14 Jan 2024 10:53:55 +0800 Subject: [PATCH 02/98] remove editor info --- modelcache/manager/vector_data/redis.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index 5dc4e0d..40a96af 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -1,12 +1 @@ # -*- coding: utf-8 -*- -""" - Alipay.com Inc. - Copyright (c) 2004-2023 All Rights Reserved. - ------------------------------------------------------ - File Name : redis.py - Author : fuhui.phe - Create Time : 2024/1/14 10:53 - Description : description what the main function of this file - Change Activity: - version0 : 2024/1/14 10:53 by fuhui.phe init -""" From a76d8d6e84554dcffbc6d59ab889038786946512 Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 25 Jan 2024 20:59:11 +0800 Subject: [PATCH 03/98] add redis class for vector --- modelcache/manager/vector_data/redis.py | 140 ++++++++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index 40a96af..20615bb 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -1 +1,141 @@ # -*- coding: utf-8 -*- +from typing import List + +import numpy as np +from typing import List + +import numpy as np +from modelcache.manager.vector_data.base import VectorBase, VectorData +# from modelcache.utils import import_redis +# from modelcache.utils.log import gptcache_log + +# import_redis() +# +# # pylint: disable=C0413 +# from redis.commands.search.indexDefinition import IndexDefinition, IndexType +# from redis.commands.search.query import Query +# from redis.commands.search.field import TagField, VectorField +# from redis.client import Redis + + +class RedisVectorStore(VectorBase): + """ vector store: Redis + + :param host: redis host, defaults to "localhost". + :type host: str + :param port: redis port, defaults to "6379". + :type port: str + :param username: redis username, defaults to "". + :type username: str + :param password: redis password, defaults to "". + :type password: str + :param dimension: the dimension of the vector, defaults to 0. + :type dimension: int + :param collection_name: the name of the index for Redis, defaults to "gptcache". + :type collection_name: str + :param top_k: the number of the vectors results to return, defaults to 1. + :type top_k: int + + Example: + .. code-block:: python + + from gptcache.manager import VectorBase + + vector_base = VectorBase("redis", dimension=10) + """ + def __init__( + self, + host: str = "localhost", + port: str = "6379", + username: str = "", + password: str = "", + dimension: int = 0, + collection_name: str = "gptcache", + top_k: int = 1, + namespace: str = "", + ): + self._client = Redis( + host=host, port=int(port), username=username, password=password + ) + self.top_k = top_k + self.dimension = dimension + self.collection_name = collection_name + self.namespace = namespace + self.doc_prefix = f"{self.namespace}doc:" # Prefix with the specified namespace + self._create_collection(collection_name) + + def _check_index_exists(self, index_name: str) -> bool: + """Check if Redis index exists.""" + try: + self._client.ft(index_name).info() + except: # pylint: disable=W0702 + gptcache_log.info("Index does not exist") + return False + gptcache_log.info("Index already exists") + return True + + def _create_collection(self, collection_name): + if self._check_index_exists(collection_name): + gptcache_log.info( + "The %s already exists, and it will be used directly", collection_name + ) + else: + schema = ( + TagField("tag"), # Tag Field Name + VectorField( + "vector", # Vector Field Name + "FLAT", + { # Vector Index Type: FLAT or HNSW + "TYPE": "FLOAT32", # FLOAT32 or FLOAT64 + "DIM": self.dimension, # Number of Vector Dimensions + "DISTANCE_METRIC": "COSINE", # Vector Search Distance Metric + }, + ), + ) + definition = IndexDefinition( + prefix=[self.doc_prefix], index_type=IndexType.HASH + ) + + # create Index + self._client.ft(collection_name).create_index( + fields=schema, definition=definition + ) + + def mul_add(self, datas: List[VectorData]): + pipe = self._client.pipeline() + + for data in datas: + key: int = data.id + obj = { + "vector": data.data.astype(np.float32).tobytes(), + } + pipe.hset(f"{self.doc_prefix}{key}", mapping=obj) + + pipe.execute() + + def search(self, data: np.ndarray, top_k: int = -1): + query = ( + Query( + f"*=>[KNN {top_k if top_k > 0 else self.top_k} @vector $vec as score]" + ) + .sort_by("score") + .return_fields("id", "score") + .paging(0, top_k if top_k > 0 else self.top_k) + .dialect(2) + ) + query_params = {"vec": data.astype(np.float32).tobytes()} + results = ( + self._client.ft(self.collection_name) + .search(query, query_params=query_params) + .docs + ) + return [(float(result.score), int(result.id[len(self.doc_prefix):])) for result in results] + + def rebuild(self, ids=None) -> bool: + pass + + def delete(self, ids) -> None: + pipe = self._client.pipeline() + for data_id in ids: + pipe.delete(f"{self.doc_prefix}{data_id}") + pipe.execute() \ No newline at end of file From 9fc78176c59e248ccbafa752f29969c0b3120f25 Mon Sep 17 00:00:00 2001 From: fuhui Date: Sun, 28 Jan 2024 16:00:23 +0800 Subject: [PATCH 04/98] add import redis func --- modelcache/manager/vector_data/redis.py | 5 ++--- modelcache/utils/__init__.py | 5 +++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index 20615bb..41871fc 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -6,12 +6,11 @@ import numpy as np from modelcache.manager.vector_data.base import VectorBase, VectorData -# from modelcache.utils import import_redis +from modelcache.utils import import_redis # from modelcache.utils.log import gptcache_log -# import_redis() +import_redis() # -# # pylint: disable=C0413 # from redis.commands.search.indexDefinition import IndexDefinition, IndexType # from redis.commands.search.query import Query # from redis.commands.search.field import TagField, VectorField diff --git a/modelcache/utils/__init__.py b/modelcache/utils/__init__.py index 425b926..47c0c4b 100644 --- a/modelcache/utils/__init__.py +++ b/modelcache/utils/__init__.py @@ -69,3 +69,8 @@ def import_timm(): def import_pillow(): _check_library("PIL", package="pillow") + + +def import_redis(): + _check_library("redis") + _check_library("redis_om") From 38f1b8d7eaa7707b0555a351e21023fdd7d50a3b Mon Sep 17 00:00:00 2001 From: fuhui Date: Sun, 4 Feb 2024 17:29:19 +0800 Subject: [PATCH 05/98] add redis sdk --- modelcache/manager/vector_data/redis.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index 41871fc..d6bcef9 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -7,6 +7,9 @@ import numpy as np from modelcache.manager.vector_data.base import VectorBase, VectorData from modelcache.utils import import_redis +from redis.commands.search.query import Query +from redis.commands.search.indexDefinition import IndexDefinition, IndexType + # from modelcache.utils.log import gptcache_log import_redis() From 848e76422495d41234edcf52b4d7c20c2818443b Mon Sep 17 00:00:00 2001 From: fuhui Date: Mon, 12 Feb 2024 15:55:22 +0800 Subject: [PATCH 06/98] modify redis func --- modelcache/manager/vector_data/redis.py | 30 +------------------------ 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index d6bcef9..4531aa8 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -1,16 +1,12 @@ # -*- coding: utf-8 -*- from typing import List -import numpy as np -from typing import List - import numpy as np from modelcache.manager.vector_data.base import VectorBase, VectorData from modelcache.utils import import_redis from redis.commands.search.query import Query from redis.commands.search.indexDefinition import IndexDefinition, IndexType - -# from modelcache.utils.log import gptcache_log +from modelcache.utils.log import modelcache_log import_redis() # @@ -21,30 +17,6 @@ class RedisVectorStore(VectorBase): - """ vector store: Redis - - :param host: redis host, defaults to "localhost". - :type host: str - :param port: redis port, defaults to "6379". - :type port: str - :param username: redis username, defaults to "". - :type username: str - :param password: redis password, defaults to "". - :type password: str - :param dimension: the dimension of the vector, defaults to 0. - :type dimension: int - :param collection_name: the name of the index for Redis, defaults to "gptcache". - :type collection_name: str - :param top_k: the number of the vectors results to return, defaults to 1. - :type top_k: int - - Example: - .. code-block:: python - - from gptcache.manager import VectorBase - - vector_base = VectorBase("redis", dimension=10) - """ def __init__( self, host: str = "localhost", From 59def83937407374fa87572177c83ce1e4e34ed1 Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 9 Apr 2024 11:42:02 +0800 Subject: [PATCH 07/98] add redis vector --- flask4modelcache.py | 5 +- modelcache/manager/vector_data/redis.py | 181 ++++++++++++++++++++++++ 2 files changed, 185 insertions(+), 1 deletion(-) create mode 100644 modelcache/manager/vector_data/redis.py diff --git a/flask4modelcache.py b/flask4modelcache.py index 18c5a58..cde579d 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -38,8 +38,11 @@ def response_hitquery(cache_resp): mysql_config.read('modelcache/config/mysql_config.ini') milvus_config = configparser.ConfigParser() milvus_config.read('modelcache/config/milvus_config.ini') +# data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), +# VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) + data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), - VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) + VectorBase("redis", dimension=data2vec.dimension, milvus_config=milvus_config)) cache.init( diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py new file mode 100644 index 0000000..fdd2ae8 --- /dev/null +++ b/modelcache/manager/vector_data/redis.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- +from typing import List +import numpy as np +from redis.commands.search.indexDefinition import IndexDefinition, IndexType +from redis.commands.search.query import Query +from redis.commands.search.field import TagField, VectorField, NumericField +from redis.client import Redis + +from gptcache.manager.vector_data.base import VectorBase, VectorData +from gptcache.utils import import_redis +from gptcache.utils.log import gptcache_log +from gptcache.utils.collection_util import get_collection_name +from gptcache.utils.collection_util import get_collection_prefix +import_redis() + + +class RedisVectorStore(VectorBase): + def __init__( + self, + host: str = "localhost", + port: str = "6379", + username: str = "", + password: str = "", + table_suffix: str = "", + dimension: int = 0, + collection_prefix: str = "gptcache", + top_k: int = 1, + namespace: str = "", + ): + if dimension <= 0: + raise ValueError( + f"invalid `dim` param: {dimension} in the Milvus vector store." + ) + self._client = Redis( + host=host, port=int(port), username=username, password=password + ) + self.top_k = top_k + self.dimension = dimension + self.collection_prefix = collection_prefix + self.table_suffix = table_suffix + self.namespace = namespace + self.doc_prefix = f"{self.namespace}doc:" # Prefix with the specified namespace + # self._create_collection(collection_name) + + def _check_index_exists(self, index_name: str) -> bool: + """Check if Redis index exists.""" + try: + self._client.ft(index_name).info() + except: # pylint: disable=W0702 + gptcache_log.info("Index does not exist") + return False + gptcache_log.info("Index already exists") + return True + + def create_collection(self, collection_name, index_prefix): + dimension = self.dimension + print('dimension: {}'.format(dimension)) + if self._check_index_exists(collection_name): + gptcache_log.info( + "The %s already exists, and it will be used directly", collection_name + ) + return 'already_exists' + else: + # id_field_name = collection_name + '_' + "id" + # embedding_field_name = collection_name + '_' + "vec" + id_field_name = "data_id" + embedding_field_name = "data_vector" + + id = NumericField(name=id_field_name) + embedding = VectorField(embedding_field_name, + "HNSW", { + "TYPE": "FLOAT32", + "DIM": dimension, + "DISTANCE_METRIC": "L2", + "INITIAL_CAP": 1000, + } + ) + fields = [id, embedding] + # definition = IndexDefinition(index_type=IndexType.HASH) + definition = IndexDefinition(prefix=[index_prefix], index_type=IndexType.HASH) + + # create Index + self._client.ft(collection_name).create_index( + fields=fields, definition=definition + ) + return 'create_success' + + def mul_add(self, datas: List[VectorData], model=None): + # pipe = self._client.pipeline() + for data in datas: + id: int = data.id + embedding = data.data.astype(np.float32).tobytes() + # id_field_name = collection_name + '_' + "id" + # embedding_field_name = collection_name + '_' + "vec" + id_field_name = "data_id" + embedding_field_name = "data_vector" + obj = {id_field_name: id, embedding_field_name: embedding} + index_prefix = get_collection_prefix(model, self.table_suffix) + self._client.hset(f"{index_prefix}{id}", mapping=obj) + + # obj = { + # "vector": data.data.astype(np.float32).tobytes(), + # } + # pipe.hset(f"{self.doc_prefix}{key}", mapping=obj) + # pipe.execute() + + def search(self, data: np.ndarray, top_k: int = -1, model=None): + collection_name = get_collection_name(model, self.table_suffix) + print('collection_name: {}'.format(collection_name)) + id_field_name = "data_id" + embedding_field_name = "data_vector" + + base_query = f'*=>[KNN 2 @{embedding_field_name} $vector AS distance]' + query = ( + Query(base_query) + .sort_by("distance") + .return_fields(id_field_name, "distance") + .dialect(2) + ) + + query_params = {"vector": data.astype(np.float32).tobytes()} + # print('query_params: {}'.format(query_params)) + results = ( + self._client.ft(collection_name) + .search(query, query_params=query_params) + .docs + ) + print('results: {}'.format(results)) + for i, doc in enumerate(results): + print('doc: {}'.format(doc)) + print("id_field_name", getattr(doc, id_field_name), ", distance: ", doc.distance) + return [(float(result.distance), int(getattr(result, id_field_name))) for result in results] + + def rebuild(self, ids=None) -> bool: + pass + + def rebuild_col(self, model): + resp_info = 'failed' + if len(self.table_suffix) == 0: + raise ValueError('table_suffix is none error,please check!') + + collection_name_model = get_collection_name(model, self.table_suffix) + print('collection_name_model: {}'.format(collection_name_model)) + if self._check_index_exists(collection_name_model): + try: + self._client.ft(collection_name_model).dropindex(delete_documents=True) + except Exception as e: + raise ValueError(str(e)) + try: + index_prefix = get_collection_prefix(model, self.table_suffix) + self.create_collection(collection_name_model, index_prefix) + except Exception as e: + raise ValueError(str(e)) + return 'rebuild success' + + # print('remove collection_name_model: {}'.format(collection_name_model)) + # try: + # self._client.ft(collection_name_model).dropindex(delete_documents=True) + # resp_info = 'rebuild success' + # except Exception as e: + # print('exception: {}'.format(e)) + # resp_info = 'create only' + # try: + # self.create_collection(collection_name_model) + # except Exception as e: + # raise ValueError(str(e)) + # return resp_info + + def delete(self, ids) -> None: + pipe = self._client.pipeline() + for data_id in ids: + pipe.delete(f"{self.doc_prefix}{data_id}") + pipe.execute() + + def create(self, model=None): + collection_name = get_collection_name(model, self.table_suffix) + index_prefix = get_collection_prefix(model, self.table_suffix) + return self.create_collection(collection_name, index_prefix) + + def get_collection_by_name(self, collection_name, table_suffix): + pass From 560c86275c41e5c26ac71aedf037ffe815575323 Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 9 Apr 2024 14:31:54 +0800 Subject: [PATCH 08/98] add redis vector store --- examples/flask/register.py | 21 ++++++ flask4modelcache.py | 29 +++++-- modelcache/adapter/adapter.py | 11 +++ modelcache/adapter/adapter_register.py | 13 ++++ modelcache/manager/data_manager.py | 3 + modelcache/manager/vector_data/manager.py | 22 ++++++ modelcache/manager/vector_data/redis.py | 92 +++++++---------------- modelcache/utils/__init__.py | 4 + modelcache/utils/index_util.py | 9 +++ requirements.txt | 2 + 10 files changed, 133 insertions(+), 73 deletions(-) create mode 100644 examples/flask/register.py create mode 100644 modelcache/adapter/adapter_register.py create mode 100644 modelcache/utils/index_util.py diff --git a/examples/flask/register.py b/examples/flask/register.py new file mode 100644 index 0000000..737b495 --- /dev/null +++ b/examples/flask/register.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +""" +register index for redis +""" +import json +import requests + + +def run(): + url = 'http://127.0.0.1:5000/modelcache' + type = 'register' + scope = {"model": "CODEGPT-1117"} + data = {'type': type, 'scope': scope} + headers = {"Content-Type": "application/json"} + res = requests.post(url, headers=headers, json=json.dumps(data)) + res_text = res.text + print('res_text: {}'.format(res_text)) + + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/flask4modelcache.py b/flask4modelcache.py index cde579d..8a3efa2 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- import time -from datetime import datetime from flask import Flask, request import logging import configparser @@ -15,7 +14,6 @@ from modelcache.utils.model_filter import model_blacklist_filter from modelcache.embedding import Data2VecAudio - # 创建一个Flask实例 app = Flask(__name__) @@ -36,13 +34,19 @@ def response_hitquery(cache_resp): data2vec = Data2VecAudio() mysql_config = configparser.ConfigParser() mysql_config.read('modelcache/config/mysql_config.ini') + milvus_config = configparser.ConfigParser() milvus_config.read('modelcache/config/milvus_config.ini') -# data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), -# VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) + +# redis_config = configparser.ConfigParser() +# redis_config.read('modelcache/config/redis_config.ini') + data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), - VectorBase("redis", dimension=data2vec.dimension, milvus_config=milvus_config)) + VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) + +# data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), +# VectorBase("redis", dimension=data2vec.dimension, redis_config=redis_config)) cache.init( @@ -88,9 +92,9 @@ def user_backend(): model = model.replace('.', '_') query = param_dict.get("query") chat_info = param_dict.get("chat_info") - if request_type is None or request_type not in ['query', 'insert', 'detox', 'remove']: + if request_type is None or request_type not in ['query', 'insert', 'remove', 'register']: result = {"errorCode": 102, - "errorDesc": "type exception, should one of ['query', 'insert', 'detox', 'remove']", + "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} cache.data_manager.save_query_resp(result, model=model, query='', delta_time=0) return json.dumps(result) @@ -173,6 +177,17 @@ def user_backend(): result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} return json.dumps(result) + if request_type == 'register': + # iat_type = param_dict.get("iat_type") + response = adapter.ChatCompletion.create_register( + model=model + ) + if response in ['create_success', 'already_exists']: + result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + result = {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} + return json.dumps(result) + if __name__ == '__main__': app.run(host='0.0.0.0', port=5000, debug=True) diff --git a/modelcache/adapter/adapter.py b/modelcache/adapter/adapter.py index f5e38eb..1428da2 100644 --- a/modelcache/adapter/adapter.py +++ b/modelcache/adapter/adapter.py @@ -5,6 +5,7 @@ from modelcache.adapter.adapter_query import adapt_query from modelcache.adapter.adapter_insert import adapt_insert from modelcache.adapter.adapter_remove import adapt_remove +from modelcache.adapter.adapter_register import adapt_register class ChatCompletion(openai.ChatCompletion): @@ -44,6 +45,16 @@ def create_remove(cls, *args, **kwargs): logging.info('adapt_remove_e: {}'.format(e)) return str(e) + @classmethod + def create_register(cls, *args, **kwargs): + try: + return adapt_register( + *args, + **kwargs + ) + except Exception as e: + return str(e) + def construct_resp_from_cache(return_message, return_query): return { diff --git a/modelcache/adapter/adapter_register.py b/modelcache/adapter/adapter_register.py new file mode 100644 index 0000000..53df128 --- /dev/null +++ b/modelcache/adapter/adapter_register.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +from modelcache import cache + + +def adapt_register(*args, **kwargs): + chat_cache = kwargs.pop("cache_obj", cache) + model = kwargs.pop("model", None) + if model is None or len(model) == 0: + return ValueError('') + + register_resp = chat_cache.data_manager.create_index(model) + print('register_resp: {}'.format(register_resp)) + return register_resp diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index db8b776..a83e638 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -256,6 +256,9 @@ def delete(self, id_list, **kwargs): return {'status': 'success', 'milvus': 'delete_count: '+str(v_delete_count), 'mysql': 'delete_count: '+str(s_delete_count)} + def create_index(self, model, **kwargs): + return self.v.create(model) + def truncate(self, model_name): # model = kwargs.pop("model", None) # drop milvus data diff --git a/modelcache/manager/vector_data/manager.py b/modelcache/manager/vector_data/manager.py index 54f7c55..70448b2 100644 --- a/modelcache/manager/vector_data/manager.py +++ b/modelcache/manager/vector_data/manager.py @@ -68,6 +68,28 @@ def get(name, **kwargs): local_mode=local_mode, local_data=local_data ) + elif name == "redis": + from modelcache.manager.vector_data.redis import RedisVectorStore + dimension = kwargs.get("dimension", DIMENSION) + VectorBase.check_dimension(dimension) + + redis_config = kwargs.get("redis_config") + host = redis_config.get('redis', 'host') + port = redis_config.get('redis', 'port') + user = redis_config.get('redis', 'user') + password = redis_config.get('redis', 'password') + namespace = kwargs.get("namespace", "") + # collection_name = kwargs.get("collection_name", COLLECTION_NAME) + + vector_base = RedisVectorStore( + host=host, + port=port, + username=user, + password=password, + namespace=namespace, + top_k=top_k, + dimension=dimension, + ) elif name == "faiss": from modelcache.manager.vector_data.faiss import Faiss diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index fdd2ae8..e8fd3f4 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -6,11 +6,11 @@ from redis.commands.search.field import TagField, VectorField, NumericField from redis.client import Redis -from gptcache.manager.vector_data.base import VectorBase, VectorData -from gptcache.utils import import_redis -from gptcache.utils.log import gptcache_log -from gptcache.utils.collection_util import get_collection_name -from gptcache.utils.collection_util import get_collection_prefix +from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.utils import import_redis +from modelcache.utils.log import modelcache_log +from modelcache.utils.index_util import get_index_name +from modelcache.utils.index_util import get_index_prefix import_redis() @@ -21,9 +21,7 @@ def __init__( port: str = "6379", username: str = "", password: str = "", - table_suffix: str = "", dimension: int = 0, - collection_prefix: str = "gptcache", top_k: int = 1, namespace: str = "", ): @@ -36,33 +34,28 @@ def __init__( ) self.top_k = top_k self.dimension = dimension - self.collection_prefix = collection_prefix - self.table_suffix = table_suffix self.namespace = namespace - self.doc_prefix = f"{self.namespace}doc:" # Prefix with the specified namespace - # self._create_collection(collection_name) + self.doc_prefix = f"{self.namespace}doc:" def _check_index_exists(self, index_name: str) -> bool: """Check if Redis index exists.""" try: self._client.ft(index_name).info() - except: # pylint: disable=W0702 - gptcache_log.info("Index does not exist") + except: + modelcache_log.info("Index does not exist") return False - gptcache_log.info("Index already exists") + modelcache_log.info("Index already exists") return True - def create_collection(self, collection_name, index_prefix): + def create_index(self, index_name, index_prefix): dimension = self.dimension print('dimension: {}'.format(dimension)) - if self._check_index_exists(collection_name): - gptcache_log.info( - "The %s already exists, and it will be used directly", collection_name + if self._check_index_exists(index_name): + modelcache_log.info( + "The %s already exists, and it will be used directly", index_name ) return 'already_exists' else: - # id_field_name = collection_name + '_' + "id" - # embedding_field_name = collection_name + '_' + "vec" id_field_name = "data_id" embedding_field_name = "data_vector" @@ -76,11 +69,10 @@ def create_collection(self, collection_name, index_prefix): } ) fields = [id, embedding] - # definition = IndexDefinition(index_type=IndexType.HASH) definition = IndexDefinition(prefix=[index_prefix], index_type=IndexType.HASH) # create Index - self._client.ft(collection_name).create_index( + self._client.ft(index_name).create_index( fields=fields, definition=definition ) return 'create_success' @@ -90,23 +82,14 @@ def mul_add(self, datas: List[VectorData], model=None): for data in datas: id: int = data.id embedding = data.data.astype(np.float32).tobytes() - # id_field_name = collection_name + '_' + "id" - # embedding_field_name = collection_name + '_' + "vec" id_field_name = "data_id" embedding_field_name = "data_vector" obj = {id_field_name: id, embedding_field_name: embedding} - index_prefix = get_collection_prefix(model, self.table_suffix) + index_prefix = get_index_prefix(model) self._client.hset(f"{index_prefix}{id}", mapping=obj) - # obj = { - # "vector": data.data.astype(np.float32).tobytes(), - # } - # pipe.hset(f"{self.doc_prefix}{key}", mapping=obj) - # pipe.execute() - def search(self, data: np.ndarray, top_k: int = -1, model=None): - collection_name = get_collection_name(model, self.table_suffix) - print('collection_name: {}'.format(collection_name)) + index_name = get_index_name(model) id_field_name = "data_id" embedding_field_name = "data_vector" @@ -119,53 +102,30 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): ) query_params = {"vector": data.astype(np.float32).tobytes()} - # print('query_params: {}'.format(query_params)) results = ( - self._client.ft(collection_name) + self._client.ft(index_name) .search(query, query_params=query_params) .docs ) - print('results: {}'.format(results)) - for i, doc in enumerate(results): - print('doc: {}'.format(doc)) - print("id_field_name", getattr(doc, id_field_name), ", distance: ", doc.distance) return [(float(result.distance), int(getattr(result, id_field_name))) for result in results] def rebuild(self, ids=None) -> bool: pass def rebuild_col(self, model): - resp_info = 'failed' - if len(self.table_suffix) == 0: - raise ValueError('table_suffix is none error,please check!') - - collection_name_model = get_collection_name(model, self.table_suffix) - print('collection_name_model: {}'.format(collection_name_model)) - if self._check_index_exists(collection_name_model): + index_name_model = get_index_name(model) + if self._check_index_exists(index_name_model): try: - self._client.ft(collection_name_model).dropindex(delete_documents=True) + self._client.ft(index_name_model).dropindex(delete_documents=True) except Exception as e: raise ValueError(str(e)) try: - index_prefix = get_collection_prefix(model, self.table_suffix) - self.create_collection(collection_name_model, index_prefix) + index_prefix = get_index_prefix(model) + self.create_index(index_name_model, index_prefix) except Exception as e: raise ValueError(str(e)) return 'rebuild success' - # print('remove collection_name_model: {}'.format(collection_name_model)) - # try: - # self._client.ft(collection_name_model).dropindex(delete_documents=True) - # resp_info = 'rebuild success' - # except Exception as e: - # print('exception: {}'.format(e)) - # resp_info = 'create only' - # try: - # self.create_collection(collection_name_model) - # except Exception as e: - # raise ValueError(str(e)) - # return resp_info - def delete(self, ids) -> None: pipe = self._client.pipeline() for data_id in ids: @@ -173,9 +133,9 @@ def delete(self, ids) -> None: pipe.execute() def create(self, model=None): - collection_name = get_collection_name(model, self.table_suffix) - index_prefix = get_collection_prefix(model, self.table_suffix) - return self.create_collection(collection_name, index_prefix) + index_name = get_index_name(model) + index_prefix = get_index_prefix(model) + return self.create_index(index_name, index_prefix) - def get_collection_by_name(self, collection_name, table_suffix): + def get_index_by_name(self, index_name): pass diff --git a/modelcache/utils/__init__.py b/modelcache/utils/__init__.py index 425b926..147a56e 100644 --- a/modelcache/utils/__init__.py +++ b/modelcache/utils/__init__.py @@ -69,3 +69,7 @@ def import_timm(): def import_pillow(): _check_library("PIL", package="pillow") + + +def import_redis(): + _check_library("redis") diff --git a/modelcache/utils/index_util.py b/modelcache/utils/index_util.py new file mode 100644 index 0000000..be6e856 --- /dev/null +++ b/modelcache/utils/index_util.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- + + +def get_index_name(model): + return 'modelcache' + '_' + model + + +def get_index_prefix(model): + return 'prefix' + '_' + model diff --git a/requirements.txt b/requirements.txt index e622636..3bf85e6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,5 @@ Requests==2.31.0 torch==2.1.0 transformers==4.34.1 faiss-cpu==1.7.4 +redis==5.0.1 + From 4636fb2e3f17a60d99081e8408a8dfe593c0a9d5 Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 9 Apr 2024 14:46:20 +0800 Subject: [PATCH 09/98] Update the modules and adjust the format --- .gitignore | 11 ----------- README.md | 2 +- README_CN.md | 2 +- docs/modelcache_modules_20240409.png | Bin 0 -> 506154 bytes flask4modelcache_demo.py | 3 ++- model/text2vec-base-chinese/logs.txt | 20 +------------------- reference_doc/create_table.sql | 1 + 7 files changed, 6 insertions(+), 33 deletions(-) create mode 100644 docs/modelcache_modules_20240409.png diff --git a/.gitignore b/.gitignore index 7d4090f..bb9ceec 100644 --- a/.gitignore +++ b/.gitignore @@ -27,9 +27,6 @@ share/python-wheels/ *.egg MANIFEST *.DS_Store -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec @@ -85,14 +82,6 @@ ipython_config.py # pyenv .python-version -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff diff --git a/README.md b/README.md index 71740bb..1db2475 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ res = requests.post(url, headers=headers, json=json.dumps(data)) ## Articles https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ ## modules -![modelcache modules](docs/modelcache_modules_20231114.png) +![modelcache modules](docs/modelcache_modules_20240409.png) ## Function-Comparison In terms of functionality, we have made several changes to the git repository. Firstly, we have addressed the network issues with huggingface and enhanced the inference speed by introducing local inference capabilities for embeddings. Additionally, considering the limitations of the SqlAlchemy framework, we have completely revamped the module responsible for interacting with relational databases, enabling more flexible database operations. In practical scenarios, LLM products often require integration with multiple users and multiple models. Hence, we have added support for multi-tenancy in the ModelCache, while also making preliminary compatibility adjustments for system commands and multi-turn dialogue. diff --git a/README_CN.md b/README_CN.md index 18d3ea5..f552270 100644 --- a/README_CN.md +++ b/README_CN.md @@ -100,7 +100,7 @@ res = requests.post(url, headers=headers, json=json.dumps(data)) ## 文章 https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ ## 架构大图 -![modelcache modules](docs/modelcache_modules_20231114.png) +![modelcache modules](docs/modelcache_modules_20240409.png) ## 功能对比 功能方面,为了解决huggingface网络问题并提升推理速度,增加了embedding本地推理能力。鉴于SqlAlchemy框架存在一些限制,我们对关系数据库交互模块进行了重写,以更灵活地实现数据库操作。在实践中,大型模型产品需要与多个用户和多个模型对接,因此在ModelCache中增加了对多租户的支持,同时也初步兼容了系统指令和多轮会话。 diff --git a/docs/modelcache_modules_20240409.png b/docs/modelcache_modules_20240409.png new file mode 100644 index 0000000000000000000000000000000000000000..e7c44426450e181de6f4b6223917128a347a375d GIT binary patch literal 506154 zcmcG0XFyZSy0(fG6+}cS(gYPYA_k;MOB5+mqoN=nWYarHZy^yur70@XOOz_oHWENk z5{mQ^njis9=uIFHAPJ;=%YE*>XWw)7x%Zs!eBZy!WM<8pd1q$LJkRsKaW~8icn+UB zyl>w=9>eRpxAyJhklMHJpa&NRkkeM{oxE?~gMEg&S8s>f{b+PgchO5(&!RCx*NWHn zrnYGJwJAGcW)R#4xb7{c4t3_X1%=P%lcn`8y0+tu*bR}Jx`RG9VlLd)z46-l*1_{V z`)%~uqc|S*9yH*%DQ(bMm;TD|mwDRLy^7eod~&1K5v~^fcjzo=qo&lTH6K=Nudf?U z5Yv=u*Lm{z*;V}Gyyx%RpAWzB?x}Li;374r=|wAXkeZV?hf^H~FYythK?xkL+{9=J z-Xi&qJZGsUPM82szfcn=MBK#YnO=|X176I9Q`w3ScoACZwW0~Bar-3TR?3$h_es!0 z6};62_rPr1@O^XQMusH~G^@gla*XfEkA3UdOHHwg$Vgf(I=8-^LN%a0lP3E;q3|F0XKNZjK zual5iIU`z-*XQ>Dv`PFJEI-4BtrTJhOISz!`{~lpASJ1nfuR9LQUq@YNA+;Uynu4F z7#u?IJ|ID#Uu=6&DQ|^8ib1WbM)4v7wj7$qQZIOO5^y0|O`KSuIMLHMwKjTv;iD-r znjqRJM5;MwJkd-cf0HIP`#!Vdrs7`~t`hfXKEn<)ztcL;!V5C9&tsr$v2)DAx>($X`R@_DFOGEKLD%Sk1#(-AU^;- z8*(@ZrV>3p2g8ZQ!hDBaVvpP~ch22y=Rs6GK=EpCYe(^dsdmlXADTe|B=VbLj0~xn zlOT6jSdH3t00G__q5P~L!7ew$fxX*=dy>1G`?HbCU!5n3f<4A55SN)kz1!}|dAHZ@ zzkj?1t;@P)qt_7y>eCR1+kY_=pc1o!%{@vNFOv$bI-)^k_^tyI@L?Y#?~`Jj80D2v z(xW??R>WvuylF{!-b_=Pix@BFHxHR3aFXdq*_u0nqST?XnPx$YRi`LkjJsN4ZTvGf zSY{C2f1@L_NaNJ90_d)3`txbq~e`O?qT z@P~(2s$hLAz8TDH=R9WE=X!in+m#wwI0z{Q5>1u$-y0&$NC9|>`9t)~mGX)Izkj&D zfxEgFm2s_a?S$(1?^wXDbTk!5*9rth8~|nq!A1;j)^IIcWO3slU{q|K1~ohI$(HKC z%pqblFTyv7q`q`?$@_H&KVVV%_hn+hkl4_~l_Z%5z?5kG(!5ZhSx^@x4!wHaoBjt6#46qfU zwp1UB0!+e*y;}xMnK2Fw>4Gj1unMs4$dOhCYob6}NBF3KJxK{|hk-faO+13rIfzJR zlb}0P{d~lPPq!r-pHzWUV{&&GSZ!5dIp}bdZ#B{Q=iY8<1IindJ&5Uh2x7-{M#H=t zW9^EMQB{v%PEe(I5jERV;W;|Qi3`+Q!~`wsm(hg9KdW4E6zixT(`R$e7ip66Y)vDK z#Q2orQw7sFMK8L05QC~4lHV~lh$fs!XrhoU8rl#`Zlo*+J}Tn7QQcRN7!Shy!T|!# zW=UYLFu&P1UXQpJCil$&8>mb*t>S`1d<$)Uq&?%n$_hlSFO&x+Fop(+YHd<~+K4AY zICjzJxmxBizH<|IN_)&4U?Rh|fR)Y9n`q>ZILky4itG#z|Nl#uLz(HSy}Ttuw(i{x z&wI3T@(;tYZU>$=`5K=ZKhXqLAHG0E3?4iMe594%%RWH)7g>=Qd54q_VCCJN%=ba& zOsa`{di-t&5jI1eMf(ul87|F~1nB`TdTqfA)BW^UN_`L41x;V`QN0b%WVKu(aYkQF z&&ZIiTpBDrzrh*3+6A$lQO17$JQny&wojL*78fVvQ~c##0C;$nc3(3;-O6Er*4zJh zX=(fvzlzsh3eJWCqvY6me^x-@$~c(ylGiyaF%_@&&4F|txjhF?1 z1Gcm4S!c&tBhMQ8+fbKmBP;o8#Ku{*Hm!kR+Ct@LCkeUQR+g_nYj4D^zL3EZG*${y zDcyHV?5KBz{8Ii2s`nvyj?(gdHzn#Nb{PI0Fp|WT?3+)nWi_YCQim+q5jF>43yB({ zcy5eIb6V|r(!_D9hU>`GfD6g_Fa~`G80x06`mU}n1y!&>d;5?lytnKE%Hchvv7HC$ zf21u@%`X!8;u#!!)du~TS0d&NpZmQXp#`f=U^XRuTHSZq9? zU-U~ZH2!8<{pZfwf}uTUq^c8c0w_v@Kf3@yD(YhdX`iXC*LGm%G3(@PCB1R-oAfU< zmIN-{K=xEYSG?nJw}Ocm{)Du&wEmK=NpVr_Nr?`9Y^bTJsUO=Tth-cAb(m63jjUIX z;!CWozv`1i@o}qODPpmi=|VI|o2+SdLjmL5o+6>@zAa?HmT!D-U;ZonAOo6+XQig> zLaH+QJJur-^U?>^dzet~a>Ys33G+P^UQ&M*=`h0V3BJ2|Z?Q(DqmyZla$?mD20c@R z6N5ci@(Bo4=}hJdCV3GsenKg8VIKqJ&XbR=*pgO)-wU z8^~~GlZ4=er=tNhII# z6ucn>`SM)bg>hEiq{Sz&QkaF_8g}-Ey+Rs7)rmDOGciRb3MTA9)H+vZYu~a4E0f}} zqAb?x4Z$Ae(>yc-)vRgLakyiuPr8qan)$HlmQZ!c_mMFpHN>##cwm6#xBEOx?eYXs zf{7ZlhsS+Xf$+h!ERZdr$V}8(2R8-4tN4P_U%UpIsW$u>el`$yvN*O%~Lm}+AR6cmz41#F50 z6NdWl#lLWXIeU*unu@$aok@)|gxE~7!XrISv*>pOI&Nb^*N&5iM4{WUj9;$*Gbn2fVSGJB7e+x3Q~qpIlC%dVN{twkZup0=dIrZM&5+k+Jy#UPPn z1G7|K_tL(~ace=iEGej8`3`!86t*k{4JcSj7Hxks1eG-f%?YJu`)4@R3Nb&V>lkk+ zx80(^pgje$scjiw%<2o$<=fJfFAUo`+RSWcpEL9S1&T$^*5s#cnHw$invSd}ecwVR ze0Y3evxhnSYWeI0$YpSA;@yej;vb_c=53J|8Tebc45cNwhvr6OEC!G4id%I`{ej#Z zV-~05Ib?TrSrY-ZjCcrSS|cF)lj_dy*_8B2)|QfV#ijzPdJ!20(q%al%ry=CkJP~0 zP`fbXQBPJ+{VWqIZZ%=}o-vJ597pb?1*ne6utfd28NCTBj{WKs6bn1{+CJc7?@QNf z>Px^3r-jXPZQCW9V{6el-HP7WAzCNX7)c**T1Oqf*0L@^XTI41OrZak;vkAezAwG* zKOGT8fSkLs(}<2HKvc@&#v2hVQk6G0^>Q)oPR-NNyhQvyglu%>wdS8g{sKn;e-%fv z&}>CLxFj~dwvznEwUT13&_K_RFpJ`0X|o&}O%K18+APSPb2+F9WR-k#D7fy^CwRC< z6!`QFkCpJbxj8i;iyQOH;^z`JL|10w%i51~b5rExtJ{a(M3s2vE@tKzuULa2gC|fv z@ruzuaHmF#2Q(aYz!Fu5Qjp-;nc~c%ETN<2&>M=L&q_l&ZfIV5;4n>l4nsj#k#SuN z3a%df=!SGPr$F5I)vQ7n_J`plm_d?FwB1exo}S~A4wC@$PX&8{SNRc^72 zJF|IhQT5BSLK`el4*7x2FAN!&t4S)ImDcY%!10+}!eTik{ty{Us=Ud{_#%95^@hoz z>(eOgabT>axUfhgZUk%OVn#swx`azm`wh(#<_BIYT5qG+F@5e)I8Mi(<%j5K_@qm)8kqA2`W_rgoX8fS!WBldRr-G+O^onURf_?1 zCQ-%psnTJ(>NaqG|F4<{u%_Bkz5&)ma`WAQ(aZT?jVPDh^f@YRKsBe}gseXnwe|$F zu%#5C^3-S;r&K{8yY0nO^EIn0Pc!;<?mG;zlmoUVMMl(+@y0B46MOd-O)_<)5X- zL7^4{phKr>oPuI%t)(#wzX=!smuA5d+lG5iVx!Y;hv45SZI?BGqD{WeKn54E2}#u$ zrzMoNT8jc053C?~$^S*r-Gnow=JkX>Z3MXQfNp&Ztnbt)Ya5iJZoDxLs%+&57cfX5 z?)}{x1wX_oNqyRXCCNqpnJ7Z@7$2KcSgy;HPs+pd_n>V7iba8)R4=u1&_jYtv+qI7 z^$-ZyGL5fSI1(|Loy#dT`hdU%PhrRMQKi@=;H2it;9(6aQO%iu z@bG_R+nz%C#;UUMm3D@(W4a#_+|-D}n?Ca_F;8Kd`{|bK7plV3n{U+zEr=nnvcKvb zp{t%efX$!%e9;;(`Z225Iho)=>x0-FzA9b$hwE*B%mz&NsA>Hd^{NB`s`vSW2!pOT zTXd7Jxg<%`$m~ZRJ0k0F)Oy-%Ym)J(8ENLq!M<3bjWkouX3BfxKU5gyBfv7d^3{vp zx>3x~_)pAWm9*JVG4m5t__4WB4oq#J+W(NQ&8C->l>Fws<*{1Yp>_bpn>UFE0J*jK zm!g%9@&8uZS9S0~(+APmyXTm{`W%y%sf%g;bwI-+ZapKfX(9TTI#p6l;m7*39V_X1 z!Te02j9KnuVpow78TI8}uWxG1aaC)BBB=mEghe%N7p=7aT#tlqqXh`t9P zc7$mYW!2Q><^ilrQ`!Nn^hz2;NlDCAx3|ClRcy}SEn359KTzfOdSzg&qq{U`@%hbP z4$I%EQn`3X@>6zS&ZXbc+@Cc2%8p@;|119hY_>UM)%t%*j~#-nnkxGE#J)FC>ggT(1EyXs!`uqAj_+&i z_{9mX*=1?0v_zVJ84Fh#7v__#5Y6$pdT-JK4~7s9{}(!r!O)b3zq`#XZ)o!a+mQjt zMR49X_v<}Qsld>Y?oAQ(zlFm8Qi=oa-8B?UV!-PT!AV*9f^3*tC-=SGsp0P@0xKPZ zT}A_rlyehy;s5!}>5lzPehnR$%i)8oZhrL3$0QAGZ<*eFHje-BY*MSrkX!femD^=s zr;ZrUL+SI3A0IP8uNZY zIMGbeA|~Xcg?@tPc>{QVdF2*JM&^s60c@QRBXn{IRJiF z)K@Nz`^Nx$DvcUxtW?f{H3slmjOA~ad`JoGbogS)I?(TC%3+H?p!Qe|c!OkyIO2p7 ze@i#nT{^~=@y4T&M*h!|+UFC;L*4oA;0u09(abKBWDKtsjaUr&5sj;#al-D_Bm)=G zqHnM8=QWPP-A6Y-D$G&uiI&%MH-xd5F( zq*!pGlGmPp=LpvV2$1IWa!e@mR|PH`x443v_>z;e)eYMJk$)e|Ko&A{vgG@rBOk#a!yZfW~($`}xU(H>$WfYPscTf^ud@*6cx{e!4>e+So} z{cE`cSVfHuLjZE;=fJ=eqVWGBU+_xlwM&c9=1O90zqQ5j>M)0D;n?3y-HhcyDeHel z`)ll|O4UmCaIZ;j-LKMU=P;`6U%~(Kb0}a|$0OGp*vbuT1aLCt&a9s=^e<5d!P=O~ zEC+c2IsVUzJ~^B2hg9p;wcXEAgOXyU!t|xqvwv}xaXalmj=AsVRyzGu7!{Knw*LwfS*Jr+^j>@^q1bDz`Bq*d(RLL8)emNA*n7# zLHNw4eK+RKpC;A6@o78>utfMK!Q4k&bZJg7wawutQB+$}Zqqt4@`}`7JU92V|Ldiz zhyMD1gDUBftK> ztqO)oqgdQN!C34RuZ;f5t%SSM{ue4GRc zB|*Ky^#p#IhvP?$D;MSaH9kA}TWL9H5>tk!6@*Ob&#HiI`0j>n)0W>S2j`;TQVx7$ z*q=9Pqg%kby=6ZnTzFCQ&|lKVKfeIU20n)-X0)GFSlb2<4N<*oOD%xsU7`!ZOn5Jo zToZXEj<@uw3DymClWc;<4Z93bw8eW%*`;5GyGcPNAv~n9mr#mOQugHW;k=W?B==VO z^YigpvSaCWajPQwHh)Rn|3=odC+baz@yX#n`UC3ZKIXuZLT55wOxRg0RjYlj@#U!Z=eR z@W!sz)?>Aiw{P!ETgtX-ED)&G$IP@8WA@gD`AS>Uta&hX>e`;_Gza&h3YU{tJUZF< zvTDZVi*Jh;3Up8k`iMOz;O?dPZEE!}-PobFYK59I9iomFhG#qkdEfcSfpk#f{bZ0P z=WS<($Upi-H5z12Dl9)tuiY9VVwX(mb74ZmY`n8ytyf-|w+a4uD*Mz}-Tg+YDrKY8 zi{-Ablg;-cV+=-!Sv;?YH<^=1Gmi5r@K^b-+Cx}|KZYUOtV6BlP{?=W0+d!RhI z`DVKQRz9SyFjZwunWh>qS)VIQhcB+t>&?__KZaQ%708o0I; zu%Gc1yi{=PVtq`i_PZY0&4p+3Haem6!dA#I>K9o(u(xsedc-j8)116#YZ^Xp= z3?sHxDTe@sgK#F_)%=uA*{D0S6GF$S0!I)xL)t8)ogzuY+)bw1eE_RS{?_-vw~8nj zWa;Is)09SAa|sq>s*J^EkwsRXhTvaimj3wK45e<^@yN1lt;OBq=yp$aDjF>&=@N={ z1go5HeN^Dyb(2Hrao-i)c&@2|Yi-+|Y9}i`T`Fgp@F`Q97_8m#`j;x5Freca24lC z85p`V?&0r@H+#HgceK@pU}0i(WCjNiG@lqKOZ9?_eahc1f6THHC0yt`d+k$j;#Fmn zj{l$*0C7U0o^Z{6`YjIFlZTZ7MI}n&c!aeP)??eeKiJ^sLl2K+VsQ`lDR{dUGB?*;mCVX4l?V3#~6 z9iauPR2PhR>}xBINmi%mobLxk%>X53r76ub?McY9))oxxIuo77o`c)Ev;8q=vwGv^Q_$1CT3@MSe!2joLaFOCsW|L{nR&xL^{*JunxCR=SXLI zZ+$m29Te<6hM??CAAQeT4vfmxZEOM#!2kKE+rP+nzvUsOU7~870C;fIlfZFmXy}yd zcIHOu=Oi`Vj-{?2!b?^v2)UkOoaEMcl%4V!o%TTuU_+1U-CJwToWrhdA52_pYtzoBxD8A#dkzJiK4L4; zz)_4;*V9&8Ua1Gp8NF5cIMpqp!FT9PMT8D@ZBeOzWHDgbRPE`HLcgnVev%n|yfk_Bd&D*B?wCGGlOWOJYOp}+neg~go$pC%8y<2k z1LfpaC`B^Ad75*{h)T4icA-rcqIm-;uz73j?ak}hFjaz^mlJUc3+^mZ)Pk#_oX6tszp_~C7Aak*iepcJ6G~2w}QqV{e}apJcF%= zMpz$ycSyWQWP=rYWgO6U z**>s&G-6%;?l^P##cFQEDcuv_bib}|pRnA+1??b$xFEY#<;mSg*0EI|=W1E!{gxlV z-b?2f{2kdrOn7LPt+A3MVQEiNtyBqW+?=Kb0K${(Z0_j#t5qtOe$UBKw1R?fLXZP4 z?b1HCF?(xh)(IBlTeyXvig5UINk*g(gmt6AJ3czuE%+=;9zsr3{zVdEJg{)edgvAf z1x~)v8^IdfTnZTFGq?-xMnZcSu z^*@WFnSS0+vKmP(csp{T0>O*0pi*KwnxAxZMV$*=NN4zfm?GcP((9GvXO32Qx@#7!J zN!^=^?IPFU;h^?d<_ft`=AyQI0V{z)^W^pZ_phirG5M38xUEdHS|^qhhmpN|CO5dh{YI%6;#{0cq|;=y6JNNV{PBWAlX_Kf zot)HxO-|)Gxx2a;(34x=SiwZWjrN$>RPk5i`FmtgA?&hj+fdnHB+G?~gAtgu`!2tP zJO8A`97@+s0Uqt`c0Yv@J8<$p_@U>_ZC#JFi-J~9YAyp3KFsyip*g9QzhR~R2NJ3^ zCuBTpZrj+(cK1n-VyeTYph|d`sh}@Ly-iuI!8wZ-G&=gZm^nNWENl#W1_g(0=`qQY zKgiZo6VEYP?=LpQMz(IwJx>*S4QkJXMhg^lpxh=jLB3QICAn$AA$Y}>vZ#E>z*}W3B$sdVYrfayRTy#^(&zS^ zu7)kl{T6^*7?*u`jj>01q7$0()}XR7vT;<2@#QZaT_G+=&~yL1-*i3lO6Ep&`I#PT zQTZrB_fJOcFbRdMU@};f3%arSr4^!W-lFZ)a+}f|7fw>I$?iupUb%qU&5ruoMoH>bUpt4Ypd}%XwIm7C%R+ zh6RwNzyaJ`m9Mjae*9nXK$FiEe0kJ6eLYJXi39>B=?d0P0hVD2R|3h$8ef^#zJU zs#n-OSyI5%WMDprzufPQhZ*{d4$b)ZU%A=T&Pu3E@9mj>;23}IPnq7oQo4grj+((n zrUmPH+>@P{w+t$0z?S5#Eg@|&VE!a%L=+UyT~~g4#Tn6Z0nCu@PtJ8nhGMmM-MGeV zCpIPcdIVm))zG%;q}R>LFnkk@Pkj+A?=TTLWCX_6dY7(hZ-lOVJiX4x=W00+HVZ1W zv`v{7^k>u)=y}4zpVZGTYS8ln|onB_~Q^W0<_XL7pHnwfV?!t>9dlk$cIV!5w_1Sn5AOx@E`0fv18Nn4o!|H!oSylF<>AsUWDGD5Qxk!#3fb}+c5u=^6dV1`vDW7>uJPUQhVecm5*i0xrI0B|D8r_A8V3su$Q8f$E2L3% zHDf1nnjw5wS?&P@*{fRWT*%!=pT+K>$4(@qK0Z4!>r^u+JxHhF;1`8eLgq6{j<#Lj z>o{7vkOAI%bl(JVImS-V_MT4u;yfgh`5Gtl9s!Q1RaOV20(N}Kc&w^J z8l>F};{0Xth+yMl!Z+KHVmy6Be0(v1oN#6)#jEA6Ab6*D$fKkwD(x;A608@XcR z9Qehr!-6UDJsl3CJIwXGvfFCgk%PIbeV!L^KmCxlWt4TN*h*|`E(LqfB|;Ve8)3`4UJtaEhg`LsqXf_n6)k%iJ$XU>*4^J+&3S zC`pTPS*`v7kuSMo48C!Mym7AdKU5Ud_fB(FT7Ta;Pz*$AqJsGw9-a}ds;>D`+~@84 z8aEL8vqItc&q#XJ?NJogP5R6#VQbdh&NwX9<5@A|1bR{F@E&KhhpUMExh@84Rz?oC z#%e1o@dkj`JttO)txpnMIrcdR+t#nw-nI3Z+&o_iiS%(o+{-+lFxdgGMKJ9}c#Jx6 zFnV4F8r>E6v@C4btJ5zNir*~3^QQig>kF2I_?6^(eY5PTv(80feE@aIF0ERr+I`>s zEf&txu)_w(IS%WM?Yye!cV5tZB0WqeP`t|ppDNHnu!@2y5sg>F=^Dor&ArI(>NLRX z43LV9R5YW~zoN~N>*&<(v6^5D;u!Z&e$0t7-;qV5&wg!33rmLe7s{;6BvWXQRU&04 zFLV3uJ-Me?0CMnqAceI5#Mt?Sdl>2fyC|ZY2YT^CM!FC4VS2N>c&FQ(mdENCO5|z? z!zg+yT+fT*=qes#!Ob&AD^UWk;@S8zpzf0}dnfEh`_5x+@5Hy2to8LHQ^XIP`tkWe zX@*;EsO8VQX9zW8@g8S#aw!jAlNWgSu>pp)b1N|H!C>HX&UM07*NN$zb~xmE1h{hr zs$^ytD(Tm=m(JLFMci4fvf54_=}U{NsHuMWcj6ao0$t;bgEckSE@2Kg#`?HchD0tY z8Ku2d#T67_Sz9sMaV{mB3kP;Zpb;nTQOnWA<|SWU@@jdP7kGEM3-pu}X~2vlH+^$t zF3c?CIBBh-yNmDE1{|l@3ens+9**7|OtaJCPyPU7g^L)JZ#*85(E6}F>1QxEW@8-( zRq`LFbhdW2?giXrxkD6o?-}#PZN<4fA_W!UT<^qPL$RpUzv8J4k*v686l4$V8*GSq zx~eIwJ~X@;RG#ytf78{aczDRfS^sl4Ni)^O|Ngr?F+Ih{L$d$SV-)$y#H(LP3h<7z zh=IlbcXv{PH+3DsOMpW`;h9Og(TDv74(=_*Y0bk_a=eri9^5(=_?p448ehEXK0vJM zFc-7FcDJ?-`ld@IX537~rrIr4Z&Flo55{=+8GErZt`~0YORZlp&pMkK&RXFkj|`?Q z^L~QxynOzIQ)=2Xu;NqTFh3M6x~#dXezoVqpy1xtoF>n~&&AuSMWNvi&vWq!3p&i3 zSum;&mXn`=ya~L;VBodiSek(R7tb>o#m)Oit8rg&5;8frS4X6g%)Ppv z(wson`pN?Ddi8Olc}kqho^qU(=)NL^D^C`j!xli zp!9r@FM)M6Mz5`S`#lPuzh&xs*d8@j3xD=TWJme_m4#0Dd-IIcl)Rmx*9Qo%B-*dC z5Ai3-fFWY(98yDZ1AU?M9cjzRRmRZl&49p6iW=Q~+QiJPUAZCXci-m(qURD7*x;Gvh0Zd=9KGJX z+98jABM^oNJ0}d(hO9=uW8so@io1KrfyIC?-gTSiG)b+kUw0>)t9h6k51fjlxaNn- zdaC;mPi1=QQ0sc|I2ep&9iv;gKYojX$|cG0VK0DEIQjauWnv5@j!cbCP{}&sgVB5q zth3hB$rFWJBj$ImRJ1UP3tzwUyu0Ij8q6#D+pK@y2qd1xq?A5&d49tLpB^p*Ss(1> zw=;SLW@%AOxI(_)p$X1Lw%mFe=)VAaPTiscVegDlX~91R>{4l0Jrm?eM2vW9mi}!7>qmrpo_OI*2s$Qk&KzqP@Mn z#>o1O>ioX>gvSg@@Cnl{7rfJv1kJlKg;q^jVqInBJ zpXhow+x;~~K0jz`O)3cAV!ydrZ{~iDv}7y?U|*gW8r?|1}x63)U#=z-I`Q05C)@Bx+h_9+xs6)doP=JB&UQ`$>-{bY*~ zlWy3x^1FxPI=&{6myU4&)U0lR$D$mVM>kQk=W3RVMJ(amjEyr>zOEL@S_m8pPly!;+^tf-c?r7`vS1ho3%SN#Y(H!zQ#!h**s(v5Xi8s=36gtqiXXvwpH zyImrcvVfxoD`KoO4WFnLpWXZ>t(uxIct&H)=uh3-|4!3KM}W0U>TsT$X>Hm==DsZ= zX-D6wwLNhdZo&_wL2*p_qeRT)3!i$~m`F-FjC&^8k9I_|&mxxany5p>oeP+)7CA6vI5Y0KRGu`b;|ccO?%-h+ay5zD+A+9Qw~JuI?d z&0`4b=HCPWNViRgW{ssrafyl7yr=6wb1$bV$CBzQ>yWG-o((!BmC3}cmiG@C-*mTV zkx2pw5Qp<%5rB1cP%pL@i(TBMUqv066`!N)4NjnTcd7b`&rgbwl%4 z5VvRsZR1Co+YtIpNgo^UNUlBIM3?cJ^B1Q%ncTOir`=w0_TgO8{kIkuA@cm;8*TaI ze$zqPx*;z%kxHQIyMsN2IHGrH>o1M1C6)qD#eTg8PgOE%_uq2(iBNfr&2~g_sV~3q zi1+UuZ4a6S~1Sgnb;Fh{;}NpzJC-NRd6|EIvQ zRv@Z!Vjoua;d)q;pn<#ME%>g~UR6?|Hn-a{cAl`4fCe2bZ(G?wiAKKA-WoV&x4k@= zR$uLDx(Qa=eHckQDY=$L%E-TVc_CXl+8)Cs)N99-Rvflp89mmo4T|xsG*+Xbkg9X; z<*-VYvgU)exPfH2@>_q#w!3w3Is#FWnu2>XVSq;Kg@bsJc}T#|ttooYEUByKZ;w0`(5hKpmBrKgCNuna6^ z^6#OT>b`J!YT&1pHRYLC;{JmSU$RboWg*Q}GrsbScEaFsYP9+;3*jWh4JfgpN>UDq zZWjCz%8FI$UgC|J=gA{#Yv)KnTnN^!nc}DU?K9?wJ)6@2@jR*VVTxZg>;iG&8Z;96 zzsh9G4ZwX)1&pjv6z7}|Pr@uAfERa=YidH!c}8pdX~ZG8z-nWY@u@+RFPPn;ux2bP z>e$7`voQvm6d36X+I?SnVubK6_T%PEuELDthPWdtwOgWkV@T>9+HkPQl;7UL!Th|N zjOTS$;P1gI?m-}c*!BXcS?w=k+m$q&h(RBz{bp@d!O7_{|0jP{-TFjsQ1B*f>-?g> zEyMr0m={{qmUlUScz85KDMzg<*V{L?LI8WDYLu?IUS|A@Z7&<2wOl83&OZA~mvPWg zuZ0+T2%aGng>?gevq_`~8L(LBy419O3D3-Bx3{gLDpeUAQqt)9o93v)?Z1UGb)5ct zObmgJg+Zf6zgIv^jMi!Dk(B=)8sqL|Kx5fQp_l=|HvUX&`!B`ZXr;p&IutPvBE)im z>&E;7NDsJfBrE{vl(Jmjx8c{_KpPwIp#;!l(sCpD`0a>Cq--U&0$HKfAu1UV`|hV- zWcL2>LVOL=|0wV?%tyUdO~7%zPnh9>$9*A=MePy!Af3p5aVGqa0^H)1*)d^0r53!$ zIT{dyKxNFP4tEG)w%4j9uZA4f`gzOEA_)Gw0Xx&2@;Nj)U$i3HV ze17iL8-YVcWJ9%Y_i$4ID9^~!=5_wx-=bArZe6kBG{GuURxTJbJTY^qYurX#b60}{ zEQYRmJMp)1JARD9PsO}0%qTN(W2cq*K>s;qFd{ocf$*T|FeytU?`a3Xsnj%R+uxyu z5NtX3SA5X998n6AKn0FF(?nHcNJ{wY;VCqI;<`Wmy)wTh+S=Uvwc*p3`ATJq57wO{ z|2*agdT+DN)cc6JO|e$-K=;P~O&E~hb;T2pXlVCxxDI}7ll(JWK(^K;FA1-8_w*V% zy=Y`y!83@~AtBN7=E-ByV*WU$8$K=Wr^_XiD>TGdxifF`Gc?Ipji0v8sVTEgOxXs+ z#OwX^k84M#=CcYP)o9RtdK9!6fm~3ns$h8X1P>Jo$**vKQCr51a@NKlz;s1ba(@Hl z9@1*ia4L%W}@zQ^}*UOnWmc??Y#>3LTtI}2sar=rf@tu zAJ(i*@#@TK+<>&4otO8MB5ekfGCr?N=s+%a9b?Wy^#wx%A0hW_L{cufUwNJ4|5tX?OA`?gh!`XHWZA2~?=m=r^U0{5HsscFb?mmUtoEp<)hhQ^OThup2Pm;8QShVce@K_P&CSj8$1GN3 zPo<)+*WA=w8~ZX=AhJC((@G+>UTTV8f$ZFUcdc?g5*lF>v{77ncq)l`ImVKXg3nBh zVwt<$^>YHChqoujG-$5^%`=yA-`+^+KU&D;o$_Wq$3ynKmb_{R2-$gw?-kkwKH!p4 z)|g$^o~n2YZQ&dXzIo>HR1Ck)*H7v(a+M>PZV0X5vj%ZraO{qQi;x*uJ%^v?uojtH zv$O#aMZYU$y+qCnFD)c}#X&&P)dBM)KdL0}-M+S{T5@aA!R!dQ9z6hll3Pvr)G@@`! z*tJ3&JA4GZAkeqKS`Ww{@t{%t^ka_R%CIvv@j4peof{iAF90@9tM1pT{{92IJ?fSH zgjEXrqEU>L9XARQY?2|MnC=bsCf@J`O$2KR@_$(S?yx4lt!oh#3xbG92Sqwak=_y& zPBhh2SI8`6cD6K?*x=0qM*`iM4HqH5s?~Nh)7R>BqSu?_`CPM<-YZKpYJc2 zGyBZUIg{D5&)#dT;Ru-PQ~9P*N)FV{chml699AQxc;xd2eZuNw>@WKMMn4J{hO-wk z(IZd}Ag>%lW>y+Iutd1r$%U8NH%SYuWwqmsL>wqFb{hxZB)D6isuhp;Rr1?;v^ygk z+2)YqOOvvGBGyu0dzh_$@M|Dx&%g^JA`)NfIj~_DQo{c2%GLqOR$}{v$&yo8kZ2Ys z8b0=bES-D|Y5Z?BYEHw_FRp$%PjYZ7GPpU9yFeAa`o9#%nc^qa8JzMZwaNQ{w50Rz znCA+7FDpaVe?>YupX}prRP4AqWG<25`ml{&3>A1CfO&~lY1cnCjQ(y$D#bR@8UT@; zlG$8TKAIr9QZr&d$Cw{+>rLpv!}dU)@`|D(j{oh{)Pn$j2zYlte%48_bjaEb4mz&UqA*s|?Ejsl&tIxQS6LDJr7u);Dk^v}Q~*+M zKtyEuVw{~@QjsW-yU2xsTJJ7_uI*Iqn?TLEL7CN(4}R16q74^f?>l<6XjUZK($Q%gdGv5`CmAcG)g70GhH3mCr`Rkw-aM5M}8O(tgZGD+`&-TJ)k!j0x5p^-1 z=$|?&u5CXOpg4EOcPek-1l~_WDCFu2ls!T-x_0C|8%U^1-Cd&SF;R7|9%pINf4AE|N! z=--_B7D*c05%qLPRs^p!!hv;6I+g`=wrTskMYjfZNnI$qCgOejj~b}Uxs6IsF+##R z@&XE4!p&_wV}c)4;zv(pT)FtFa0!AAOS&?zcvffkkD!r8pJVbG<}@__xTD8=a(?$` zip};!3;n;7IQl1jr2K1b`ax|IokbSQ;bqum3{+wEE2Q?)V*58I4|)vm&5-HoF{4xX zh}FURku$Q(xhG#waMH@A-ml;Q1iFRQcrG_9D96h?`YS8?uvYc{wRq&d`hr)TU&ru1 zA>lpe=-P#&j&ANpZi3d^%mAPM=(><^W*ffO2w@iJo%*=q+Z+S=#2$a}SJiyC*^n#X zP!Kba3l?08BX7-t6_%2=fLzi|UX}Xyz1vmN{Np+{v>ChigjI-DYoY+N4QLPXBdUt+ zaz}q%%5%-wmXgOw->EYFj}BbAsES9-dNv}KuVflh9^bo~2r;}no-5e3org5pzb9)2 z>(iKmja^Fe3x4DT%}REK@rkoBntf`gHB4Wu$*acI)JRk&)YL0LX-hNUD?$LzXHWH? zWSktN|8R?v;ah*{I3-}M{(q=GZApXY*0{2X>VF+nWAJZ+Q=T+qm9l(tpgBG0iR^A= z1^#|_@45ZiY)|H;>pFbLxY|wBb2mCxmTEgF?cF8H@W=1>4eF^~;F81l7YDLM`>y`Q z*!ZJ6iVXqFXTS-x3v1%KQejz^jhY@g|60^|FzBWAFL`Z%bu0eDb>5eK%H2b(c_L}I#>|+9xg-Yf zV>#O!_om2TxT5y+k>{`zk1*#13Z)nfP+Hk@xEk2iXtas(6GVtmG!l3;pd0imPhl?4PR z#sT^Ul(whx-6d<&XR|_tR+hbD&Bm)8B8TC99qz_UC}x;Nr#)%G5}E_|#@lWm-(sDf zg|`s<0Kf_R{9A|L^)g`oxL_pN2 zbi11`UHI&PfV>Wbb`g2~&7FTxscr~Q-;fI4<*x0rwpAtfzANjVOsA98%6mTq(gms7 zNxmvgJ{0(Fg)Im>c=muVxI6W?9U(#ai>v9c3Q$jnQwV;M-mn6Olqh->DI#z3J7{It=9D z|FNU^S!V*NS1^R~VP3$}ZLx&46uN4=IE3-4ce@j{Y$!i@jq`BmD=o`YJ*%d&gKP27 zw~RRwjF@^shSQw?J*}km6#NgBQtwaR;Qyvc=JN%@$mV|iz7qNGDfx{0JSb$azapO& zVJKwp2g9S8kHPea{T09Gk8g-orOI=c5$D*y>{?ug9DIQMLrkDMp>_VMeZGYQ z42y@Dk5{6R2U|+FeD;lN=oA$C`g5plN4}e+-@BOr?G73lp<9*;3ir~I-Gmo3U#{-i zZ{~sI!xWbCpvKk*huIBpe~j%BFmJj_{?5MveiYAY0&e~1RG0rupRL|=8@*B==6dsA zh&fxQp*E&bzQ55$@6zZqXD+uzc}9s~Cdt=#E*C2!07Hx0Hs%EdY$?<)1h7h&ea)Y}((E0l&F z6z-hMR-NJoV90j`Hr7WBj~#7Ph)*XeO7?jpH=m4& zIiumQl`-^wopyv;MNK)^0ZPZE<)bm(`qBJT-(quz27PG&JM8WLeMZwGjk2%UkcMauq> zyZxU8+Pu`qTu1$fNt3z1?R8CC|~Udkh%GdbzWw#S!lXD64XHRq7$dz zwUBG3vwh`QyL{oidAX;8=W>70aDZZ!z0HYApHfX}bo4{Ib*zBpba^V0uUugvW^Jwb zXnh1>xcP9Wj!`Qv>)9fq*!p-Q#(@Hz@vqccLBoySQUmJ z^GwacoiZNniMJ8j=bhJbEq`N2vaY2Dk?uZn$=+tG0z)CJsdZTm$FuCerkbV>(AhiR4(kw9)o5EEeqH3fw>m*y3aoy7gxor)+F9Z4r=MYP+a-Obh~k4c^g4aw z_lCM?Ft-2Jp^&{vtv>!f<6aUp|JFp3k6}f}^0PG4p&bP4LNDUS4Q`v`y_um2mivb) z*DiZ@RnWI?_lk_J@VNaESxXuBdOUEhhTl8qry`rEs4|BquZ>|rI*oD8%9kXQK_A9n zfSsJ6MMu>8aMD-ZrgMPnE`^s`fOJf$7WxV>{h0Q@ZTI5S;9><*%c5_ARiiHUj6Xds$ z?U)+yDm~o3BkRgOVRtZ5v9x}2jST&rK3NnI_>*p5S*Uvb<=#lJ@U$nf5+_o_8b@~2 zyqf9m?6HEx2?o4;laZCf^Wzy}NOmG{$iX}pO?|t6pb}~>!R>k*Nk%L7uRieBw#ss> zt(O6SQELV6Z%cQ0L1uJLl>84y_e4S7%EpG5 zcCHx&q6-G(k(aL;2VxHS3T;Avl`Ox$5nqd{rb6H?a~T!wS3Hu=HAIxn&BM1#ye_Z- zSwu2tM1}t;kuH^yY9QKM-2~5SW6N?(=#SaYbsUU`Rqx6vOjJ++ozDnI&M&~tRc|vw z_lrHh_be+LS61;$H|k6ve5Z#K&zv&4rc`4rHS9;({h-3?pt&b(op!qj-XFY+boI2a zpaWFKdAuEbBRB z!9te~I}OHcmrWiMEq)E%^J;l!v^Q3CE;Q)q?6%1vx{4nX|(2lR0IwWr#+cs^o-Xy2%2hEd1Ym)xDjioP~D+3~D` zX|Z94g0CGFyMmgl^L^iLc&ojga;vEhy0{Y}C+_k6hvTy6{!U88V=Zz4ixk-B9_T`p zqVuX-Opg-%V7#Ab1-FPORY1L=sz&M9RlH&8sGCwBpYQpa^x6yHV6{)GrB7m3>lN{V3vE139PC-^7Q3-e?_^^j2B(h2n92gR&~9d_N^c4 zOagjO#zMznt!V(g^RiE=mP)o!ophU>!EY6$zQ3?dXS7${Z69x!oc-lubzbzmp>m^5 z4-q-+7rH@)rliv7BDU-Mr;%cD1`ljh%Uk3~ZRkEn~%SC-DW|@Jo_`%_&gTLCb4Ea_r+jC|;K7NHvx= zU)SKJfFrttJ;Z-2lK0PgdVn6=e<#Nm|K9)71E};I;Tu3_-!isg0d4dfs0_Hax_D5xOYUQ+kk>^OS9R^Om|b5k z8pW|JzXiVU_5)*4{-l$1OMZj5xUA~am{IiZfo{R8bfMLl$91^Q6Jh+H4e<|01(*UE zsVVe{A13elb~_J*H4h*BT1>C*Xt_qi*}l~pu{#F^2~L%PJih<3LYg^0$`uOY!>2-h zLYpP}E7~QS+Eve`K9QcS6BO(Xs|&3ieUrLnZ@p&XSjL!^tS(u|UauV`Exd@n&Om&2 z;snX9KslxQ&=5L+79D$6CwS{EWxvsFN$&8~3a#Qs%KFz=hstl->H7l@;s?b{HOV|! z1o%7xg~JQ5{lPn977{aH+fn`Wc&j=Vp3xOr)5KkaZF~1@8Mo0iqg}(BCi%Zlde<94 zQ2>MJ-zg8CCv7C2_A zTek7e6a}Gc^d8PMq+er^fZva zo`rL_Y%>3gAoNF0k&fKgQ}G$RrovBc@`7AUdTGkvvTDdHRI7-%b%6|Kdhd7*#=RMQY8W zn*|K&R+($*+j7=s8-mm~Q}m({B_hHBp38*Dfvk3Y^f_H3&eL{@LKn&^F)-9EQ+HWb zM6BBfKhE#kh8^e@?L8w-&@dOWLp|U$pO&(sGxqGC9I1sK8W4Kq=_hJ$;q*@j1YH9K zpRxIYD;l3tWy^mdmup+aiO4K~Dz;!^&B5BC*U1E5h4O>|5_z7V0jWtqAMzwLH4#uexR)lE$%Rf5TW7Vd zZiL@CMuGx4SPA9Ot?SQ+N~Epd_n*twt-F0>_IpGZramZd)o+gM zuJ$S$S>OX=T>>$e65CqmUS_?@2O#p7`9dF_gwt>mF+MBk;Rifw%e-TCFq=`ijk%Ex zQv;Pw_jB4ogl^gIIdHX)Q(%d#9etG+O3$G9Xu^m^hiN`A&A*3a_Wq$=Z?8vh>ZN|qL;^ba=_T=Xrvy|$LEyM`R6lw!C!rj>a&lfzxaZei3?i6#Lyc%I>SXQ0mep2AB^>#N3nmBtAxrPn7& z%?)P_$xf1`)JU^(?=CY~&S?o`afbW!hYZ2;n**}qE>p_V*x1UK_%jB%|1gD~8hNBA zdW>gWm^AJ&F6}Vp{8r=b4P*1XW8iFWn@CoT{HGGIxlktyv-8&59U($TSiDXwu%A^v zk>Kk54AS3ApI7F+;EcD_zrXGnp>&%-ZJl2|Jt#?NyB{t*f85RTsfY7bTURkj+O?Sl z*OJ-1I8h-y^aliBx}vxJ?lGk^+i|XS^-Uw13#cF8;&7O{?k(^SU|Gd+SJmZF?zz)T z%__g0n^9*%9mur4+utPNs&*uzzat#!&FUDCTn77=yf&g5t0x&8B^jKeVN2{ov` z8f>$@v%0OqBg&BpgVsNE2b68#3)f zgav(OG9{Slku3nsWIH`FG>>R3c)#3Ly%Zj0?Fa4#!$ax}8=hoi2_h)y(^^mt15w<_ z;gAMT*EG;d5glkMf|ajsInqulfK5t_yhWowQ9tnYW(eMnkGR0^Pu9)o%E2$` z?f8}c*>46yzIe=K4&##$NY1Dnj=nR1<1}S)9!R;;)0KLyKKGmfnJP5kWJLBa4N)~K zS7yH{fpFAOOOya7UtqmG@ZI&MIF^tg9xIRSOI=*h%eCQ+JoRF-wp&LhmK?{z1~R(- zRDuO^tF)ecj+7UuM^N>FM8B=4DDuz)X`Q`E9cxnINoo`?jnPYu4(B7EPjBxYh;c7kYJFRtNzmET)Ql66X1*N?6!v(kJ`H72MbTsrYD~ zKKWdvF5j`?NcOb_=*c&@TCU}bAl5kaoH;-!o8j}ICkMaqpb%nD9S=)c%{`&{1EsM8O%3^|Bm<;sp^44q#+<)xxgP{07< z%Qzc4b93O_nWDOb4P=Pm)@|Wj509a1=L+hT4j#`7%rU*u+r*TGuK4Qpx8mP zw|r8ZgHjWQ2IPV4vb$=QGFevD*%|muS;NZ5@9d;&>|`adQjS?>*n7?Q9GV@>2CO=y z%LYC^{@C`oy)>f+YHg6|jC~M_4bzu^UxUsG#j(~5*Rzh>z^woDtz7Nwd!IXJYluo& zBS?Sk@{#fSM8{`$Y|gh8P&LI1xrU2t7OpT%oKaA=R&Wy;e1e|u;!iS(hQas~csyH<@;_E9V%gwoSA?HhOA4qzBZq3fLDau$7x`q9XBc)^C9RG#p30pznN!$-0T zQA<1F&#XV&`YO2&^TgvobjW6jcj&y)iE~~I$EvXYW0#18;gcP5a<=3EO7xAN-ZRW+ zN3(9wVXP%f==|tgY;U?_-m$u}2Vg(H8mrz&?J z%D)@&LSLZM(UziRA10yIyuATG+o&gbp6}9GH+v{WqwYQW3Q}%mm~odzq0FZ?ykreq z-tq%h^iltK}v9U1tDS5&CxS4CkgB@tit zd&#OwV<@diKw&6mOn6)xys2buYz121XK*EtV`?)^dZ0fW_zs>rvEs%*-M^ij?{jns zlehiy%l_@HGTm7mO)Jysyc4f4>S15R(vxerl8xV#2+cm^`ZZMS@m`mywm4Bx$YLZ} zkgT_%HNWWF&)wHmwh%!lJP&%xjIelrfVyql9Ny_8LnQ<1da=z1c~jwZ!!Y^>1(v?G zL$ZpHnvtp*iTu}SLz5%ry<|0!tH%TrmS}@27SL?Ynj4m6Qd(kWT(^OZkv6PJy@c>j zt>TX4PdW2xPw|;4vi>5Yw%lG2;g({L%dE>00);1LcF-E|vpPCsnzUh^#eF$8 z2CFyog&`tNju>%p&9Ms?GX>qob}|IBu83?fiWGnNDFiFK2^XG$CB0HMdi@}Q)MIen z$&~Cb$`2tsshN#|?i@XxV7yJnw3bE`1KsoIYt#0hoxgMMF04Sm&m49yIirp>0OZrm+~z(Gj6h078G4sLU4zPvuGY2vGHO=3>Xhgx(Z} z4+knN#_~!gLxvz_)On?eTV(%kV9OTDav2HW%fU`n!=g@*oqPuhyKuB?fZ#qWZ1|r0V?qSUdte2|HW^ ztnmY=adF4C0X-pXV%e?hOC3jziCZe z9gHQ2_vfr{i6e?$%oF5vEn-QzJlF&)Zv9dy7V^15Oi1Y7&3D>2vQHg|jSmE~VW+dx zXllSk4xl*9M1YpmI8u9i`^w4`N0m8M=-=%4kEZ`88&eHw>=V((J5FLmundJq=O+SeNOL4uuUmWQ-d-@;& zwZaITmyalUa^n55>|?+kSKYKYS8e+@H|0N5b;+*ryxsXb9r&dts~b%|B2B`@P;8%n z4ba`dPyaf3AZ5wn_(DtOU!G2|ul@9wCsLgv3|J{}a(KU-Z)!I?+n#u*$g=}-67c75 z?Hti-mXU@>2Z*s2*`p!wik&)cFAD~%R48R;J>ibE%PF`J|0Z>TGf{2vq_YXgJ+i|k z(|Eh$fFL*k4KeyyvFxvVM}xelM+R9YXl+5pRzO+Apk$^m8tfcEkZ`akB zW4%v*_kdadlj(3E&98nx!m+JsKcc1~6ET_Be;N#RdSA5~*`?uWb!pS)=d5K$#0Pb< zwFJg~(D&_Onj5!5YY@MtIPV=ZRg(l#s01Q?QM8QlzVX8EXqKHbz%9uRvI2?q7#lE& zSIPsS7~T*N%K6JC<2x4+r;*EMGg12f>0=BA9*KW`{bQR0xY8#5)gvNGx5xwFhop6R zOQDpH9|YepbWG>PNj-Re8GNK-GVbK`b;UCr$MTkNfIDhmH_LnR^vu>($HI%(mXrpW zv<`CQ9qBFd8o?*V-7hORNRjkwwWBc@D>=5&%dl!KeKJi6ZA2#D9Ft=)*81ZT0*6-z zEKb*_2p&_|&>tYymbQGWn%FckX=#^N@!Y zSe%1p$gBfCtY{AFc&V7EU5FzdPbi%3BSLg8>>G&4KN`|uXInkz-<_S0#bx~HPVI_? z)5vnCt`czny_t&Ja8vU-rM+bQN&iq{(qWC9sr< zg0Vc+S5#pYWAfqQ!oqGbAg|BOW`gIBgRUgqY&at-)Y7V>%X}8=A)oH4p$4-VNLqc; zw1XRdt$#!naLpU%1dn-zu-@iZQi4`$$3bc{YyEEuO<#I#iSL@rJ;5#E==#_@a|^xN zx0TmLpX+JY=*qUwkj->|nTmH2ovEIJ0=VyrsumfyHFkezF1i>2Hp%OJQN$Y%ex zdslEP_`8{<7`b^N^Iv)gh3(89y9D2w>PB!d#b@}MvIug9GaSF#n}E5KiodEph(Tg< z8t39)UpqVQ0ecJ7?x;cqtnxlIyq@)_8t4}aW8XUJ%1h`V`s+qe`85))8-{Y8pGmSt zUameq-WPjNtXl}Jgr331x{G6_Fe@zFuqF;C9JvI@IFtZujeK+1of0UP0yTyI8>_XOfz!tu(&BCqv8isU60Zi zV*8B%;fG4MY+++*mBteG^BUKAbQn$>psHeW0sJmWM3cDO&Z(THXkQ=|F9 zJ%^lAxC7SS_j-lP5OSl7e}86hhV4l?`&8Bgl-$ZKIIRK@V7kD6_b?7Z+W%Y-93y|` z;6lrp^aGAKjzbG=U4>K8Pti(rsmyo$_2CJ64A$A!%eV2UW_l4{$ z5@4&G;`Enz@3+)TPk^b(Ku8&V)7K*_YcZ1^CkR`L{Ym8PO0RY*0{^<0br@#~mYVph zrQsE96-*Hy+|C(_v-dal&4g%%y;&Wg|8{@)qATvn zyMUXT%)#p#Ynj1bns?ysg){Mop32HSR9XipYWhZBuW5QGg)pU=VTLZ}VuM)7bUXHf>9+Z)j+-1J%Sh$g}T@^m^sZm8%!P zB#ocd-8sQD$f9BAzQu>bF~`Y%4$`}t!?%+1x#K%x4mEk`lS85{DAQGGHQ`ZlB+u*?*^P?&*0 zRm9(xw)#Kr3MSB?`O)Yxv^z-fSU?%%aCzxFM;Ez7mOguX6`!1G9B#y)AP``}3JCEKk za}p)vqX6gLoCY{*Nae|%Ts>X?mw?~4N(16x362s~_DXRUAoaXr03@D3WkMI+ggAO^ z0A2ngN1h(h*ST@`#$6T71G^2LxAxO=8<>WHyAAKF?W=}f=ioeD-m@%9eS{9C4-Q?0 zNGmjgLM^VspEo}l&aB^+Omr$tvN~}Xat-6i>vsQKZ=`4a($)v}xBF09ld(T5RtjOp z7O#*Rzjz6F8C44dk(asTO@C1{Q@&EF+;C)g7~_+Ve;RUC=@U`%fGE`cns zKUMRQK>hF>@BHx29S}Cn$?YOMnL?ED8RZ5tG+kzi%%=w`?5@4T6M9OWgs`z?T-+Bj zAk7c&VQ~riT{@qR8%eMKgt&sZXpE>y{U|Y~vH8;hge8nJXHAh?*1G~<8HeK&-1Uz) zY?Q2)QF4Y>BegEPDb;I|hQoXNY2-C2&WQoei7~?uLrcImGWj znMQ4!MNgn!lLjnkmsagtKbD+}fQ`gNwoIwg=R;Q$b5gXLdvin#6IW@M>aqO^bW2J> zYw%>l=5r-^xKIxQbMe)?kU~+YitXft81L!&Y{=0V-b-nC^gT)JwBqyl_1*a;cD8_) z2$xtgi6^lV3Iq~X1`^<;G9WH8fAdum)83GZNRw_31R zjBOK;Lt|!6iv!DjO+T&p?CHcw$VF0IBxx$qyP@gSF)x)*qYXs%lbV$uTplLmygU+9 z7~=PCAJ23G?sK96?e-i6N2%x3y*bo8aQgBEGV~dJaXb3Bks0ZIxuq_|%%}!~WbL)a zwpwA&NYMS}t^5*lteehs-`$S-tL4s0ZQn@@nA3+5ZpjyDaN;EcB`aO|(vJU_(PsK` zredxQKz9jMi0Di9<5K;Q%la6EkLXx)$uyBy zZgNGXGgrVY8koW0cAP(fU*7s7zJgY(N!L`_Uu>y~NE!&n7&~8Q7M0<ymR5+&2} zHQ746#+h{NR)cf#MpUoy=I7%$rx4?0K}G3JE=hMG#sOJ@TVx4KvPloWwuo^#6Qbxy zf%!F~aB{)F+mRtl=;;y%_ef$Yo1EX2_LuZk5ZD@;uf7N=19Pncp6AIlUnl#~?e6Qk zn(a?lsGGy8mp^mnlDuXT;uwm=Kgh!*JB=^?R0o@WC{tDjvvtbL_cG6ePp4m4`?Y#H z)4sG(DMsb738;r z8|-X;B!UjWjxOCPIphbIay8D9BUSJyyJgQ2P(spyKVe8=75Mgr^mtdeZCmt%JHYb- zYhx0a|3M=xxmAuIwimwm+0xSp2t0SZU%L|?x)v;fFwBM}U@GL1Fr`ONBcqU*gF;-b z1NR84}1}UDpv)CS#*3a=|eU*PVDYiZ$D!pq(8;mTa*3G zv3<=C#ermr6?a^u|8;WpJ8F~r*I0d~pUN*p`M+}E#wj13jLq`iA1sairo&_&ffkVa z(VxMTE-(}Xv-!+Te6&xgIR1!Nq?#zvv%!A$$GR8+c4q&%@*USG`W-Cc;fU5+_zrHP zu|Cf;>s^tXPxYbC{0E<$oJV_G;KXjcb9AaW7gNU8z(7jABzQzbTW!3rraQbh_=V$p zOJh`;fP$T2lqLUSED$qTfRK`oZ(%g)5Q$^>E1WNluca=AH1M?-%JuK&f~dClg}&r1 zelVlwc_xPby6oIRkAS~a$XS7C^+fdiF!IqTdqAtUhe02)E^T7mhR_gR!e%g~WjKHR zoYEl$nE>*A-Jdw&ps-Swo!CrMcVwDyCw)K05Zj&0>PY(}A61VppK2l=cIWuQrzZ*n zl>J!*N;gt-mc(R#&HJc=85iM17r|`spl@^B3;*zr82bElBRa@@D>Owu7~?WjRWp_n zff8`0ZQPOo$ytS1U9br^3mzVbV_edWN>z)lN;zIw z6h_}B%j7>4>e=zKCcio-mJ1pPo)l)6NOl_>;C!yyziZktD4MJ1uW1W{lDfi~cIF@D z1pbOxoNg*SFF3?3wY#9tG-KdS(Vi>doaQo}1RuZiv$y`ZHlk5yF=#O^lFd|X%By|_ z9<<)|BE`-8T~viwNVr%?SR~8PN`Tm7q+11WA;Fup!R$=g)Bb6zuc6t0n|V!Oxw2_D z0JDlUI9M4NCZsFB`_C^~xYo<~NM?MCYi4L`TqKe0mbY@Hjxi@7&BR62IhMb4EyWcX z!gCbJ6Mm}^pjlXzIX{N$p>lBfwH-TWCw~;3Vna6C(3ikok%B91x1%@IWGTb>xK`>S z8&51ywT%aJvJ3ll=BQoW-0{Bus?&3Zy2#lB4IXbICiJHOn21B1r~UvW;9~|#R{y(e zx&aUwDT-k$n(qOLcVUlOeGvn)Q(ihAZ6}Q-7@n`6EInTa#DwlG+p)J1T?+i4+g74p z`!71(mk_vcmYwzfQ_9sBZKIo(zA#48lxc2uZinyvonlcw+UCLAb9P5SYhg}`WDQCJl-iQq>K|P$)MfLSX;U(RK$eZDS7_ZiC|DL=48a$ukOID8drQ9-O!BG+ zs>93EQ9kWSMbPaHDlWD{`A3Z*Jg~SYp;F7pq4*Vmev!eF3Sy(a5DeVig8 z-Tn&A-ssfq~d}-=ib%L z7wX@?P-HE6`v3J*bwWuG6{E=WO1rze-^Ug{e349g`2868u1)H#YaR-4%06y51qRJ- z_S-ohfaj$B;CnK}7qC3q*SvOko8N6Nc?sf)Pj<=v7>cxi>Tla&5~8PgAqq2ZdELT7n=;;nB66 zECO=nTWldI67YAH#GlUH2Xn!mGh#`hhi9Fun<9&RNRx*!=#O6=Q@G~j2JeF}xXPjJ?fZeYYdRv;!ZZ$!!l5vj+4N?hK@S8elDjrZyzARxLQ; z8CXCk%ny2>8Tw z)$z`~EzjajQ>zu2pVrmE28@jtWNA_Lm4F(=BC-jufpRn-1S@}Y69}0H5#T->O4*LE zN;T&>&46`(!7r!ciSXiS=u$ps-tB_}u_yHUHQLrzjcSU#=^3&jTWZ^D)r@n6kAz-H zG5@HVPl|b)jmZ%0#J$hGf9-*v+#4^u^m*JT<39i~MRwA5U&(fgj+RFnrtV+CVed|m z>;v*hL>Ws)s@-%9;3GNAU^gVt4NekZqkmw|23XBYfDhaIWj13^g&i<7tS*QN9u3eO zh?U5hda!oMt(4F?OgnA0Z!SpJx&Bkxb)XIJG9>^ik~M8z4zlZv)=3o&rT0vh|I^VQ zfWr7|-4+%Hv)s>9?Jn-F_a?0%WY<4REv6{osE5rfBO1OcT^bEJDwvK?I-VR;C37gWm-8nnUHqf{Djuf z2(8H(SYJw4zDz)k@;miSII-P1MIIRBDHus7+B_!f)U%eA!2Hc>HPHMNnWY(JD_3Cg z2TA2*cef!9)B44i@ylakK*!I+hIDp2p!uybybkKnXsTHbUW55}>? z$wZ106{gwx)DjoKyC?xy-`TQmoG~#N>ZuUEfAdu?rA9j1gK+40@jrmT-gHGnTEWq7 z=EdsWDcY?;Th|O_Xj!3jQ~MPXEBw1%_=b{eQ%g_Y;Qi~igoCrb9)-&$OCF5ii_J)w3&DI)4bCy;=uJsSl4q0+e05zvbVMSbpToXXsLmTGUf zaLHAqNkRybuYb01PfN zb6A3g=F&(bt;3^z5hslo1?__0lZUIWJgZzEc9E-ouJ3L2BUY67hC6(=uebM$1wpQt zD^cMh_oU2ob_vKiA7zO8Ji)KECIAV0HrY(*S=X|Dx3Wn;?K7;%i-b3)F78(X z*sT|`MK3-i$@K>YmCW4VGh|^;R%|Cr`78p_Xzfy{sg}Odz+buUH#5cm!1I*LBC9-2 zNIKnrN9;&o+%ja#pa^lWAnUcW`ecxTJvq}`YIff+*thAF@gqAn@XZ(np6_k1ij2J) zT~buaLR$r=#$uU$Cz+Pucsky_k+SLp0JWUk`p@ZdV6yLX7fS|}< z$8|3!P$-n4b?aKAvau~2qgzS%Lw2@^S3Y=;4Ay>MIHiS*J0tt$YK~0PJd{A)H~wF9AGP^nmYFQk}BuG$`B+rbePRNyO^vV1u~QCJ34HR`*qae_C-HOhD}7? z-^dX68yH+FvlzYA)O&)u9s9+!)-E!S*|>CK1x+IZM+)|O*p(t}$L zYd&~;p-sf`A13)oggZaKlh_?j0O6tEY8E*GsOoE-TBk`bB{L`mlI-SI`SZUS9oMH| zk^hyq0V29j^TLp;xBh@O|Kxo-;-pvnJ3yVU{{!_=?jlgejpp7{<<$c7I7g_jo|PSI zcxyjDTAU(;3ScdW_RC+H7bz1xZC+D%ou{}d98)FA8(TnDe}|ZF$o8$tN!X-nZ7wrU z)7t-e>(m_B6!Z7uo%ZMd2^ghaq$NAZr^E%5Ps?5crHjn!S45q?CC{ve=5DpMzN` zC-f!dioe+Wo{H~c?F63F2>|drbc-&bCK3`L(AomVtGQtIttoYcGE$9mCD})6OVd zWYL>x5xftZ`jUt6l$}o6tk>@JvQU#5B|!}SBABR|-QXhT)JyA%|H7Ugwu3o2B{P&T z-sNj$_KvTF`21ySlTiB^s50^) zHp+)*kX&QZRJTJ0KF^(c>|47~lX4@h=oG?4Etpwcj}ZZW73zCMwrT z(|;(jy`JtAn-O&psFZ-N!=Yc5GN1Md*2jgt);It|=0C?{=Oy2Q&n9H#>(%=w99)Hm zf#Og5U)cS=*z@WbE(vs5bDpEJpHH2$>rqvSYxh#&kiTHOl1k81tXYBehK?IJd_?o1 zPca$w27%sRv~Rx~V6U;fi5M~nrkQlawcUr`H-H!AY>gPrh_<49US}?9OeHs$Te`Gwu+SVrW`}glUej6vhYxhF(+(Xa@ZI)N#;g%gY z*cW-w1i+?P5QCcbcfXoQzv6qCD1hD)ff(OTFaRpOZ#|%Q_m^c@i6vZ=_q%nDXM?>WH3QlucX(~ zLZ8N5p00npgT6!T@DKv4Fe{|;Pn~%==uGuPYUOu>fpfMCFDcE_X@B}2>rP~H%q%LrI9U8(}XM`}xOHxUr|0GcD?o19ncwXf^HN^y z^!)2u4j)qL`0|Y|+0??>+5j}0%h~tenqxG&Ea7JUQ4`?0Olz_6KmPf128cGKv$JJU0Bq)~%km%p59UpQ zOxTfm4q-oHTo%`0q}+7vSXKb2(6`Oa?_W&kk`!&TW2N9!-Aj;WK3b8ob z965S*&g;B`oI25`bc~1erjN<3!F6xm1Z7I>C=Nv|#~~c25CdNJi&xY<+C{1-Hfb=+{5k+>GsG7BV1?7;=oC2Zx6+Oy9GM@ z6PoH)eij=lUcYMdHF{W!dw#EYj$>TId~Z?=3s*7CC9m{=$4*3ha=XSk6>h$uj3mzc z*IirjeeIW5_t_nppM_aZx=uPqDDTW$pk}OYWdLR20|xIUL4l=|S&;vwjvaK_xHjkk z0MixK$tNxvsg#^~kEROzZ%2JkRCtIqmoP{`pOs#y<$YTo!Oo_C?#TuzWV7 zd_paWZd)j;9lJ&7OJ54U1$KaAwkbX9ixf)Z<-MG*BTC_&u4frhNn`5cdK3MSb#->h zCb)Gvmamxnm>P1Eu{#%dNvofkZP9xYB%hf~X?N|mdjY(i@eP-HLiXeu!Kx!XQDN1< z+!g?r{tMd+mqAum4o{fZ{N&1%!rD zWMbX?61W-1MtsY)h~~LuBMdN`fp5&6J4w!>EzZXAdaPum;;e@EX7%zz+^|PHYq5cY`bZmv zK1{O69g$wyhZ$e7a;}`lw0@OMX!|^siUziuF5x8zW<1}v-1_j-UEhQbP@R-+ z2{mnvrnrvY6$PuS`hlp}%e1hq~VO7T9L_C=i$WC^P8| zj!CwO{EaFa>gnZ+r{-T_L#=ZDV3DrJaG~m100p@*f6zL8TXN85H7Fy@pNzTHyZxfj zlYH_?l~o2Js7fNG@tlo504+8rCV(H!;li?^QH{QJ*pr><%0Tz*vuxAsJ1 zBEq3!u&6N+l+tJncc?&;*;=;)U;K1C9a<}DYv#_HQ0gYi!TXss5Z)NgR(l zu;)F62Kb6kp(R-K&=hcf|1{DBK&BLdZ(rD60>%AgWif_eb?v3Mh9O@$jhlnwlhKUF zK|+F|oMR(&;eO*(J=FoCXy8c6lw%wI(V_?#Td6{{fbrY-Z&`U3FAc0yAcNBoV9#pr zKCR~Q`e^tFn=lW5tV|C>`5F$fxFg-=l-uo-Qv~gH*Rc(Ht*GK`ToR>JQ_j4;5*JW8 z7%LBy1}cJ4DIK<%)djHI2@p;y3N#%T?-X}b_v)Y@E&Q2H`8;VE9pU+A>-K`Q$JwtX zhBtxW>?40z*yHk~_VRRYi0ESLj(&aD!q}DR-2PN#*2y>#G0DN%FQ0$JtPI|Tk0|$F z5LoQgE-J(Ze}N^UnWlxQGf^b3{PQM;YBDzj#gm%tj+^xlC)d4gU6Qo$u+GR-byXNw z>yZe9Kjtoi{9IGN{qSR(bI!_zHtQ%GNnJ%}V6k`}Zw`I?xwX!DXPU zuFL^6*v*bgjvsVD4%t#`yizN z`^|S&hM~=*8#SKG_}Jq7wi7CQ?OGGoi^)B%=dB+QK8!APEmig!yve)al^uss3djx%uI>Rrxi3z;D&|%E@aOv>)phM|hriK}y9aVsEc2;9B!#~Db{5q(ar?L? z2F%CEPw1%iQJ|l&>T_mz3QCB7PE#+(z`|k@$VZ5ENe^R8y%hDOVXsJ49Uy%G^}Iu@ zZaSL&Jm)2}5pt$X2bvA;OkXO?S;dw8jep8;s_E4F(U;!CdOOP_NKwevLRTv&!s!@p zNYNGR9y++1w6EmqFn=~a*bVZU!8T=e#E#7kifR3ZO_&Bky@8o+XW(U9b0h?RIf*3y z`mY-s@bXUDi<$Zy+w24_ZCR`tzq-R@FU8USY}VDd(#(4_xxZ+Vt*pJ(DV1l$J$%g- zQ4@?FnuMRLE$|bp#DbKms^vi*kyY%JA#IF~c|rRWtyGCZiOT*1t9Deb@O9l%e^Q~E z%C9d%fU0gVm3j8@%Y->;amxi%YikzEga)i}%O$q~TU<^(^Z01hd7%!ic;J-d@s8F! zPf^H|1Bx2dW8!+F_&885I(W;ZkU+ik7p59@S7(e_ic3~jrI{iJd-4sGkHALShqB3U3u9;eR8XIp2MY0+qWVRF8NhaQR<2>#yD!R1wHA zVsDq;ht@@udro~mg- zQ!AA8Jc+JV+xc`MCZYpiiP3H*0OUI@1Re)?HNZoV}- zDGccj3tohw z$+1V5^^j;#Ob#O^IP9gC6|KsLinp+`5J^zLaT1~b7pBTzz|G6k6#9&z0`fdef}>G_ z(`##g5s1n{$C5j%QYMSvGn8=})oXMW*K$U6145J`&~Ox}W)zRj>t5cjFVdYd(Wdr8|t7Na5~gm{BS zIBTpp;!U+RrBZ6G&YI!afyNN;9g{w2jQ4*v23YYqQm@jY1906THyy^6YspmnT_h0H z;HgzNJL?f2LWKB#!mfM~^_V|bDHe$P8wM>TfB5x@3{x}wpPc9<@Z|D+_u}X0Ej9OA z(4qUGEymrZomZ-|PjOV+3C345=6AJUj6G$AXl!h({h4l@Zwqm`*jYc) z^R8}X-M9@02R{EP@_3cM z)Ep4VINT5FPp%lUrhDjKiW=Wmcrc#QPyLv%)A%LO%-(z4LZIZ~dl1Bkn)xF<*R!a2 z($-7$VHQ2RVW|)bIGQrho)_ZxW{kQ@o z#P~~S022da7N$#3It6_;Ya^iIINjKL)&?&ZlLu4h`_C?OV6sO=V_X-(Tree&O;L5` z*KfXCs*Y=UMIo##I3TG$2Y1`0`~%1B?ON)OKT)QWjhwT|dpY-^^`OKK<%`nD zokibUpMYt4QbFl`ox`7J3%y2U)VBBGGKqYe zHXpfIimi8w*kb2b?Aqe|koIQDYUCD;-)EI%eDQC_)vHFf|H@QKWQ@)2cGuPuk%Qio zqbyY?)g*FP-Ym&-4mC~qw#w{ed&zH&-eBMwSC3JZUr;x7$7LOqE<};NZf*6{)5kUfV3o~E`N(an_axFbPkqhgrO0ec)hX9y4MBR zwJoJ!g&3EZ*%HB0=r~7~h1hCbeqdg-bUg{FpKO13q z#cOB>r0z$fVA?esmzRE;)P$HkoQuiHjp0Fz6;>g++*apv&R=1le*n4(=!%aB@vpP8 zY}C>K63TlT!ioEjUvJi)i*nguyR&lP^S{_x6&6~WH`4u{GUbW&YST#{%TivhTA*>) z*SqY$*p``aHP?!Q2CfGlVp@{?F^-v7jBz+Phf{6isK7V9pR96fB4x<g~>f(g~>A|tBJE#GnLQznDV>ZHTLp*NA31R`m*)7p|yA4 zcR5HzIbHR*iUQIZNXR#UN=Ti@tJdhMut;M51Vy)sJJ?fHzGGX zcz%6FC=7P0)%88ns!!1#NPrBH>C2Lgz6IH`S2R5Xk|r0zLZ9Io)j5o6#^J9ewSBC3 z!!^BNm@5H5M;nsMU9K$s$cmF)8>UIsPqHQi(}487=|sP6SiUl>3OHB%de3dN@s{ZV z-Ja;M<@fJ3+jm!8gWt9Xzo~sA7y!Srm}|1Ae9n&v;8fvcs#?&~2icIkvQdS8efZe1 zPvo=tu-v)W%r}A^iNrbUhiC>A+ui^JUE(QA^GC6Q#Z|rQa*~o}G zfVDQuUSpU63h+=nFHTC7lkI)wgmTh`^1f zK*_x=d3Q}7*w2=z=ngrIS?Ml)LJLZpN@BQMVgE+w@qF{MB1Mawe3#g$FrDB?@Amum zVlcvMq$|+7L@zPU4yPpyu%}1f!vzoF4FOHSyt&ld{;nB6KO$laX0c4T^MQ`Mp}Z0u zQ6EDpG=;s=wy1f`({Y-CZp1Arv{fp(tv9w}`1SbG_7`814fpXTUh0rWn9?u0{Fz42 z9FB5BU}n#vhO|otM`pBEhP10m5X#Rdd!$6)KfvY?ilL1Cz554g{)!KWPc2C#b+}4| zD%LuIn%m!o$=wGDyJpeFpV2bTxE@3+PKj!jRuO$BvQ$_NWJ^-#6>@Z*X)O^Kh_0?d zWs}82Kx%{Fxz&)_zuinnrUEiC!3$QTY)wMk&}QDH{N>qxC5-C$DhVPE(d77>X3hKE zdCwhx?Os?46g5zxDGq+c9~c zbnCsyy#GV8x=uc6UarHKYDYXr(rE6zS0fu=Gp&ePT)|LKu5TjG{tcUQr;x4T{5IQs zWrHbbM`qkf-}>gedmrT__@2d|5m}yJI0p6%1|plP$yjhyC1c1}9cv$0JWNq7Y3cF0 z)lDQe*n$q0WYyr7xSu%JFR^3S#p%y{RL!*iB6#y$(!gU~jhT(H_oV4;>;GU!3t)N? z@!H_Uvi*^@Uu*bX{X5gokB8jp5*e{3ao>GZ2OV@j zwK`mQhX){Dv8l_Da{C7EfE{j<#?k3DA_=)pYU34v(Jr%n+kQDe`dAVGMfmD>^!0`{ ztU9{9L^Prv-rN$19&^=?Oo;$teOe0t69>)Bo5QiPY{qF+oXg{Z0R=GtWc&{v^c-#_ zGQz!xc~$CAt`Pw;GBfD%qEbu7?BlZ4~d_Hs|kNeU?r5c2pbo+}1y<<{XR*9V;+~xr>k+OMpls z0i8(NePGuWvkX3_qgD{6o||aHBnSp7nH)@@e0!i65%)K43dCwHfE&d{y;PkmdfY9M z$MG5(pY4fNvi%|Sj&na>dVBC^9qzK1EV_^PaV38RhF~yrDsG-s7P-FG;Fu#h^5o!Q zXau&xnG)ys5tnOd!T`uh2wGNcHRy)Zt`VjA=a{`lg$r*BaK@l$cOSMI{`D7d!hR$= z|D|gEtpL$)bKkon2UMp=5?GCeMZW8%lyeC9%E5ouc>@w@JIJK(%f|+K zv+Gz%!M#Y_E=)lWl9*;L(oS+Vjaz1-!sGW7H=KQ4E~_(jI9~0<0JlAM5KtxqaenfK zcsT;jL@=iFNN3~Q@N`uK3K1JTIu&E({ZRv zb28aE-Wn%fht)9V8v-y4?YulsT-;^k@PcDiaXr0BF z68>O#w+a&6>|@O1&h&G>@KdaO5ykPt>+?Ib^HN)nZ4U+fy9k*vs)8c#pG{ znHu5XQA-;DwhYl3U>&~2J1@k4M0T6fDRR(%j|j6GN9A5Eo(MZP_%>$Do8^IjssDLh zO(4-7`uHdRTWnSVmQEP@sl$UFs9~^+51iem^NG+6H^gT>>kJd)Kmu7UZELBgPQ`F{ z)G?`Yy8a~QGdXUL8>06#C6j(Q-Y@fFk~F_0-sLP=agb_4V!PnA8I|-7K=}6_Nn;$$ z+=2pP??(yX&rcz>e#dpDPrhflo&@u*>XkHPzv!|zDWbV~D&`Ov&oID=JQ3ImQ;!jKN$ec1bm1|e3ZAH_!b zIk~wE51?R&^Yi_jEZ=0~W!`}1uJ~B9F-mhpLuH~1g|#V;|K0AuhG@2{F4qpio|)o= za6?^BnsUso(k?vY)#n`7f%j29+fMxaC`R=YrLrfd94{}jzm;-3{|<{XxJ{(b*8U&N zq@CmTB`J53Tx=lDIO)ffb}SAa#Vytu$jcbB;;7wX_M-Py*DX$|Vlp?l!ahU@ zMu?7x+@wX;BRuMff|d-ft2hSp=+5huBsYjr6nHy$mW!1r4~iZ&*ohnq3hNR<8u(Bt zslV{kB{>nG@#%)4JGF*q)~CV6mLTJbel{ieP56kML~a%=DI`zF_Btow$$O1rgiVtr zjQMcE4`dKZXM|bk-oqbw&HVuw)fa;~!B7j24Ua%A9C6KScJS|Qrc+H0aZq8RPVkyf zan*MXlZZg%DZ|nS_=h^ZtVi4fkWjuqJz+!#Q^?{|t*y8&+kh^2Gx05W!AP@IVY!|t z^StqzJ?ls^vPr|Y)$0x3Y{%1d2xsD`eQRDA`WCS2*e3#*THjoBL|RTe9`utuq^{%8ia>xq$Du2x4fHnW)zZ~>lly7S^EBb3TQ=7N5 z)k6yPq94gU4UX1%&UF%bI=Zh_Be3LE8pV-M~bE5{{NkaIs{AwY><)9*qSFxYmd}A(X5rq*?p1eie zyw0`!;J~`ifPqS7dvX8NcH{bsToyum=zv5{!p5`qXa|PgReyJsAl9e{;5qF9p7TbY zOr?`-Eyr|D3H|9b>BGmqj}JKwa(=UTCHu2aqu6|1P;#@d+yPfN*OTcO4H?Wp@~XMV zHxFOXmb+RxennJP%NZh+)Ld;hXfB`ZZL#4%@8f?jd}Eji$&&$|Vyzu!_vGjIE_1l? z`Pe2+A>+z{sosC~)9;@fPLwz-X?9QJNm9mVdYD2%;Ovc*hzrugMOPCA#km^nE_Ny@ zT^!+RG0E|jYR&m7lep;Yub6}7GlZ9sbBB#4I9XUHX?X`0gkR{VA-(^0>@%7%Iwv2M zVgm4<-5Bo!`@VxT%-6S9+B#aW9lLLw(?D^;VwVa1GCzRi+I^;y!qZ%?W%xwd|K{Q8QxB5cmA2AQ;trRg?HC)EMse!V}wAguOM-BPe8qx_EP6gWgsCC@Whp^ZtZOM#0lN z%(Q`qP=x?J5HM63M3rjK;V?@)=d5tp`74Rf88oBNw}gY?L`AO~_Q%!wo5}5D@;HW= z49Wnjp<^kbbMQ-xo4da~|MGeev>X4`uPNNfhuNZUdr zeL>vzd)>I%wPW!X%o%A+h*H_1!|{@E99G*8Ak&tgI{YZN@O`W-`Y~im{GADxuPMy1 zq==_NOuYqDtehxc2;xU5k!c)z?InP5{xH!DO)>t)uzcuRS1P)8^0eEE`Z0Nq;R#-x z=p0H;!>VDL&OxUD8XKm<{VqoLBvUV6*UyGDiDge$CUFry=?0oMa2#GH4{wTY2T z5@fHCLv39aiB{)_r?Yot+Uy8~dWLuR$B<_;s87+yawu8j8>U?S3(M12!Gd-((x~gJ z8}&3rX~SsK!J)B?pK-)2nMvfw=9x@uHPu6Uptxy|XAQ?#_gDJEqP; zzvo^|!T6`8UEv@SW3;2WXfe#@%JgpfRU~5GQbpjSze`S+{HFzy{NB=GX?|l;sBq7c zVgX@U0V-gZ!%#ZI)tGy)XosdZ!_$ zKMp#H=T2uitSOh$Mx8U;I`<-vzM;_#sBz1zMB#l-y* z7#LftjEF;_iAI@0S?t}G60z2c)v{09wi~SF67e;In?2gRS*Y+vh1j+u-pO6IP8Y`O zft&~<0nkE*8y075Q_+!TRj3u0X~<}P!ZhSnYQ*gnEq%=iUTw~*8-KCBn#2IWxbGj1 zdVn>aNCL0}0?vG{EJzc;Q|}*)Z|Mm&T2SI|uRa@An9(0&zxahvYeuuthP#jDyesQq zy(z7k?ON;{-BxFUyg9~Zzxc{TpbWQFc$d7+sU)=z6;zCGHgf|jjGmgxs5-!6;46dL zDH-vupm2e3O{Ju{x%NX69fir8rvlw?JsP?DgL6-y#NxH=+O6gXM@Qc8&kWrn8(e!V z3?^h{1+{o|$&m5lxqnRma+7`^L!l-wdRj4pmMk7IHY01`E*rV`lrI6!3>y^ZeL{z8 zC*82S$Njea?F58BPNY1D<^5@3k@Nk-*1)-cME~1ju!ZwSoFC%$ZzDm;Ig~~-42VYy zZ!ky?r?gm6Ls+;uQMMk@5Bn9O`{{!-GvvcFd5WBGg({iTqk)qRv2YGjzNdGf{t5%& zj|tk_D5Qu%SR^v7E$L4m0#+?z%V0iV|j71RjEfV_aQ_%|`)8oYd5nO4HT`Hd74X>QA zmgY3dBJDqUq1@&;sI;>yGH)?%Y_+HiICNtH5tYX~T?Y*VN0%I>O`5s+NkRTJQ?V{z zkGJFA%+Dot?Z3MrQ!9l&%1ylrPoUNOXb9gUpFMfg7iVfCYoHxk@1=Mgc~J z4+(gA_PB(Bgl!<#1Voywg%1DEk47bf&Hv(J{)r+yxsfCfJ#&>&@ zrjnM_0Gp5nFwefCTem@t{CF@8sEm`^4y+XQ>wk!VAEZPirE`2NuuHnLZOv~p+jO&O zp*(Dpqd>($J;~_3)&mpwIF#9@C&VOSbFKf}CU=%0u6MXN8VG<=7k_6EE>+lHsiE1n z-~SK)#AZC`tXub407t751J8X3>MmzNOx$SkT3p7p@BuuFe|NbaoV)bpJW$4qQV&=9 zZ_mWB#x`mGce|O-7{f}XLkc6i5}_~LW2TLn1Jw!Vq@HK~;fCWT-zk3!nf#j@PMyU= zq^~gN-`m2&6pL_xYsiskyoFU2?q#slpshMDlb^&et2*!a{WRu)y~QRaiOE8iV%VI= zSZ#ZZ;&%^^yMUFSz|Hi(noREzvs|j@aeJO) zfZeDD;QQL#J9&U-T9#bysd-d|rBqBs;K*PDUc9g~oQp_(=E;AfCs1EkueAWI;+`CJ zu_>0`2SDNeMI~zz=L|K^2qPbn&(+oC;Fm0+gVyi~DfT-97ZKapl%)a@k({m3;GvmI z^M2zZR@8@O96FZ2S!MBlOUi$+%A$-?GyNZ(l%rOvD*nvNmqt`3#*nHY=4`ZG!hnZ%2(|J=lC_S)MTT%Q2$garNK@e zx|DadRe^~xO=x56Eg&=rLmRO5dL#{vcmz#wpVIbPqdyuA9A!9J&N~g($UZN1{aFpS5+AS3O5A%$f&SjI!|oT0_zdUDer=5Pjz)QG)kVEgolW+ z?&y9=*y8V~6u(6A23YMs4wrwfd^}vh4GA)BNoSeWwv~&@zC)Hus^9xy<#N1cz;eO} z$Y98+r^VP>p3|i)-u2h^qxHCs)K9D6$R0UiK=1-3p2At**ywshu~&w$NH$U+ubFED z4(VWS?K=+gy7E1B+JKBq?^RSLqv4xnOW)Uuq){kT&$VSCh9hM>^R5#?>KROa@G}i5 zTO~Tco{-s-yyJPDhXW57tV`1<{=3^5Fzg-NEC?||o;0TRti!Aw+K69+RWoSCu@GLw z<}ETN!-}R?DjRQrU30_YL4P3iO$kKXFS#-xh*D;`u30LdII2(~GMS#&B{JLYIVIJ^ zz^8!E|GN^Ro$(&QJ6XgNl!)+AHbZw#YM+$*rzzTJeNat5Nxb zh1To(L0)D~2Jxc+I8_-2GLx>;b?v@*Zu$WI3~eg-Ru>&1W<95|7;<+=fh^AZ2N<`< zlm(6Wyvi74TExx!VP7aTSV=)Vly`)==^#cw7XKV$FS<<1s2J3+M_zQ^uz=d>uEjDc?O@UpkORENASd zXSZ;Vr8P$`A!&HpG*>-!A3;M-OJ}HH&U&K?@#SyP8E)NWHC@8cWB~RdIPg%&m=Q7# zYyy=vskRSb|ood1UgI+M|lNZ zMiQ4exE7+sA97D6mZ8Wr;5F*qUkvk^3ipN7gW1n-3b=kNyAC+fP9;st-WWfx7|%Oj z)Ac}C_0bYK)x)UBd}XbQM@^?;eyntD2Au&x*U`XJdno;SSX|+p?h;O&stzWft{?Ur zg7@6_Wo6N!QsQT;R1+AlnD$3QCvTZat19h$7 zWb-O-a#gSh!Bew20 zPhGPm2mTM&*F;QZ9pQ^&kSUULMB$3|+>iBhn5r1*)Xb~KHMUTx*O{j{+Tq+%X~w&> zlQf+dG(xgy%;b;t7lE;=_X8>(!6bfICs6O?!p;#h)K;__M>eSBuJBrwG~&-k^54<4 z2`yV6oH+8#e&5UBUv1>@VOMFy?ko~ej(5Pyh%LiH3%HR7yF{QUmAOU*Qe|7TkP=qc z?xRbD7A-Mf{LHc%UeUWl7&r&+qaaM1##Oq-wb&~jpUhZ!bVxsbIE21uqFBro(AU>i zL-0HOUSxq-1%3_&wB708nHdMaU(!S8a30l*Y-Adgw&>;|jaI?s>#N;k_N1|4Fkf&u zI)@%F?`?^D#PMf}U8@%%xyfCAafkPI^**3rxx)M8 zd-SmPgbuYAwuATJs`~Zlggw!GgL<9L)I}iXpI0G=Yp9r7%5C@FlX4e9k4W=xpD|nZ z98GBw^AVKzrWz5n-=-;<+R|vGG`HWb?m{QPF6Mvc1BrkH5Nfh<7c_YNDDD;l3rVYH z9!9ymcj3%fWlo{(79kr-b<+_=g=<~Q4r&XTr)Xp}X{mBz`Gqvw>S6}ITgx2TI4L70 zaW9o7%MOK;R! zjDllW{@^FEr5dRn)--$KE*yRt?A;|vAky$P(=U7^SYDGziH4?&N0BrspO6f)D0>HH zCHlBh!S^nWJs)^WjeJ63^7>eix2byzoyaUv<*Cl=*WJOuga52Cd|NnuLZev&k4S!u zM;QG>FY(9mcn}3W3Y6JQerZpR{Fgdn0vfHpk-2esa|Il?#c7Q!xkr3u45t7YbCXJV zU0r9?7Q*2Aqnm-zZu`_{QfAVovx&%(p+P$#{wf!1KQq0(#?=J}S$de<3*xHjM7-Q% zz~Hy{ER1?hbdJGHx@FMo+LxQ;H-Mg^UlKkGyitVk0ysHn)u7D*@19|cztt1=H{j?q zV>gabW3D)!0iuQDV=0lwU&{E>%p#c�lV8w*-D9Q}2L{SeC^kai@dBI7$DC8kDjL3bC}Y2wVJMa{yJ|8sA|b{_VwtXVutgYm z5(b3ol{yjA(0UMr7Br2sT-X|~(Ivn8l(Vq8TQSc&UX5~Db029Ha=j&de*AnwF{}&q$prXlBni@Y6psKTyOBWg>o1RJ zg=3UBhTmSUYKDDz++ofi}au+$dfd5@HEk3s|oKp6u_=?B124J`KcOh83{X9 z5B68GlY*_vkJrw}#oi_$x%SIPt8kS*nAg%TG9YeH2=a)Tw1we`3j4?ML7#vVunNmF zGtN;OjUhmy6IB?g*~N*QnRqcupw#TI=1=l<+toi9=TY`eN)Rzl&ejr_o2E?Ry9DA? zY%E8r`(qh&+3(kg6jdFmL?{7vU)?oRH}OK|xTS8{3>ta4+xVX9AbF>r#7!%0EW=%vFkZ2{Bw`Vc+>q zbg5$89d+kjs{8l$0*ji8*1!`1WTKxz{t_97mhVg~{(!e35|TFnXWGp0-rLjl!8P+J zBkfn=_!AB;=Dx&7OJ5!t&(gL=iE~ju5?NWcg13S&IB>uM)F!~g-6iK@FL?`c4p8>E z&}AU-o}#8-t3Go&b5z(rR0VZI0sAV>_8ya|5Hrl}EG{NHg`|=4`2=iV`5+HWd-=$x z{9m&lY&(E>>s3?TaX=+-eN}&VikA8LV~OeM)Bn%~{hkr~ujDmEF42_7|O`RTY2+=ilYm$6tMJ`k|xM7 zs?}J@iShP}6h;c{VxnX$2$XXp-<^G>SY9);+HX4h)L&G&R<=V*RenGvI}VF5Q4aAwtB|W3z6Q4Be}z ztN+4J${-_r5UZOW#+2%!d=p=v4SJ`SM@`m48pcZWvIRcKoR8#hIrcH)=@B-KhLDp- zK3+VWJ$(A%alzAfKZ&ieu_WM-%i7}j?7BBURKbFNOI^vt`f88Opt!BBG-+abyTtBf z-u6_`R%0}$wAGSOM#K83x7CybE`q7UwB%4dc~!*Z>l5JulTz-X0t>o;>VOjKdCeba zv``|)c!gUikFwv5cKJ&s9yB!t|LqIp4Ug~{V9E<|tHC(nfMCk@RcR|&u>CvD`tjBI zc|QyF{dz|ECoZl!Ef~EyNqA|Lk)M;sv~h*g1v5YHk30*ePxx<~u7Paxtf$>?ZMK;H zpo{3xVpn|WTG44oIwh2VPg#(9^P>}o?<9aMf>iaz$mE8lcw#;ua^dz1%&7Z*#*9k+ zX^hqM^+&C7Z3O!IDPBx!`S*CZt$Cm+kD>7~7}Kwq`KpDlBAwC5QVv_qBtjl9;?Xw1 zNSH@NwBdx<_n!O{a&} z#v7z9b{0q`yq-dksq%2Ym(Sflggp6qN9YbL8vM*>t6up9+uf3T2x?#b=lj*x&cVYL z2GjKx%Jn55&g_j=LZRYnt#6a$f>XF@&rcM0DlL7zI|<^ZVHZ0uslg$gq*51lrt{Qj z7TV2EDYEFNoZ#-KZ!Gx3n%%3`zjl5u0<6?nDwvEmYV-1FWS_?g&Bhtat948yT3ZE$ ztZ`IK<2ZtH>Ad5uTK?plwTGk?Bn6WC!Wg)xy2s6SO>)h5jyEaA;fp(1;Lqa%R#Kd% z*=&nezuH`Cpi3v?pzM{cL(Mc|e@kJhEK7ZS)VAo&D()ykh%L&c7URShQe*$K-J(1- zJ-$^%^07Z;y0K``Ygpk|e+IU;V9T`<7%rZ+m~8>F!mPMi#i7**tHJCpgn(_by+dl_ ziryTBocWo9MSsWm)1s>Q!1|Q!42!82XNKD@%44AFH4c19TWz{9vO!*}UM;~u2)39a z+&Lcn5ff9JOF9l^k9&5jBXs(+o>IDubMK7vXK1n7K#|3HG35H=s)e6gGfj+QPrcE~ zpdQ6#(0yE2$~I{_;~0{%;W4`quNwlT?GJBvLaAq{%Lpn16$hcQQa7fl-Tq?fA1WV? z-U?KRJbpp1b+V8*mE#141|6d@T8<+c#Nil5h1o=*1{!sXc1Ch%`~ekoPkBRz|6vc{ z`mTVV@SY{(Xh+$hPb7h7d(rbzp-84BC9rp9H9zvS$InkE%>5`np8Y!VH_Gh0KRoJW zJ9~MzsAe++kQXwHeRHDYQF`+9Ub@otO83bRx6tpmd)0$KK&Rmno->EW+Fw3znQ08Q z{`D$zd%X?E5CA(|I)}#uz67RNqW0>zA z(`pKY;n3}k%ITjr-LJQHTT%vkAYgbPO7j(OmzH(rMMXkje8=1x&a|hSYc5D@)??J` zbYN|Rxpe8|`u;udd;534CR(^*lP)o-L@N)`&vuR9ZsHhUp^y(nJ@)%CJXC|QuijSP z{iVC%i8 zbeQ@?%`D>ApnQCY3Q{pxOodFi|FT-bYs}T;HFCJIvJDBtsrkEgXmcbP^c>JI&3438VTymhl=hwn5D+19gxUV>h)!s9QQ0$Yu}A!}?gK z@N;YPP$*fKAS0A$iAt<)Sq!9^Qb1$E5nqwCkGcsXWU~@?NVh}UaM4>DkwiT%rm-H< zsGDEKLqJ)ys`mbDz7xNIM~8y;>;mtNAeEK|^#QZ3%n+8{nwgX^jx@zRJO~at?wX0( zx{bQ$0=xl@ZTv`E=(O@@myvBQSvI)aaD}J`Yb?@fUhr$WyvO^2a!`Ggf%fr=3zFNf zpM8a67b@?B=SmiV>WA0WgfUyw5ALZNip`V^2dNFpJN7=^Ki&Anrw>9SXCy2lXx(p$ zbuUFW`)#u7?$gvuy7%aa-$=0QjpqE%E?H|YlxcD)AC)R=%%4~hJMNmrsoUBOv-;}H>b4)YBaoEyIte!AjHo>6uE6vizUxe#>AdzO68|-rah4xGd)_W z&Ee1CSHRe0x>M}RPd*OeCGcw=HH)}EmrBbaWI(3p&lN9&vcn3Gvm$T0Zf7kxynLMV zLat~v)*~(Kwr$PuFGxHY;t(=$=WKm|>?O{5`QjV{wrLCc%1fn$?*vhP7|!|bBg}bO zXKhB7%kU`dtSvDd41c~w-Q|Ii@Q8*?B<0lJ);K5#52vf~-3Z$luT;0z<6~J$m2=}i z61*{1-iY{rM16N$6J5|QAXO9ugn*P_=+Z%Yi6B)Gl&%7yqx4<_p%-ZaN|T~UQ9-KG zArMMLN~B2d0g;|i1BB$p_xtX>eF6gX@~xqjBF7NgaVVNOk8YW5*4N zY6YQNpS=$Th%|7*5~PO?j%UTAe30F+%MDmJoOnUJ(juF!-qb5$5!6)qphy|D0TgVg zIv!rQB5gm?Yec_AK!XV^gbO(F9sV=%zyco4LQ=#VDy`qdE$m0oxD-5L86njn!l8jg zTG;t`i1(;ApxY^DMe~vPCyOQttAJxO?_s&A+PHkTcZll>3$#u1zVzQ-jLI~p!=hC7 z+UfT?H+Y(fofjX%FB&nVZR!U~d68(-5lXX>IFhT3^*^pQ9 z^dZEomQAs|_9Fc(CB;AnP@QL)LaV(syC&H>r;(W#SigUnLG#3Ut^TU%kkxjr^|lzw zYSVi)cZV_w&jM|H@v%TRoss8>1U^~9srp&|-3~f)_ZgIt+^4gtN|@j^uA?ehf&^oI z7GNs?PwP@VsAs6EZ%c#hkF8@}Bw&(%VNZ9witpJS5}VP6kqS%iKGuH2!tiGgNXiwo ztunThNIxd>2HDJUf&+%W+p>ci<@SgkU+1z2oeT@#eTbCep75L_INhv- zO;c97HBk3DyX~(SoGijK!oB0#&6iV#7RIt9A)H=OSg4zd1)NTxI za81`AJ|5Z!TI1OaE-p_Kl5QZiZLnuKP3lIi40pN0tzyZorUf-2Tc&^V>ygXRB0HZi`fXeSNRrwXvNaZSsL(4S!Gx zg>8YxVcbj5rLkz#kII4HPs}F)#^Cd}qibw*bHA%X1UEg~u{t4JWYSpKN7Lv(kpd6d z#uXbq<{0iLT+7&3Xmw}CZCG~|0lC1lOgaOe&t9vO!nRLfF5-#3Idb)-)HSR3#Rq$B zM|qZ}ru;^}zbNQnj}l>|VhT+1MWwx+SU;0?qfQh!gE1^90Q>wC%A)wq$(iVZ-7e`U z7l#Ys*}kw4)4RDlN0BG1(i8plkp0YH&nLE$i$CSEZ9bqRZ)RFcZcZ&dDX=%Ewo%Ws zCPyMtK6Cfn>z5iMUnw7+7N*gy`Ycgc`Ms6wD20|#Bbkr3c%WRFs~W6P3&*66x^%~| z3T1tKdAW;Df>@erEkSUXHT>$hh1SzZ3G23(rM0{nta(Fv5H-6(eL_haH-Fwsp)hZ? zp_prYEmah(vQx_LQ>ufq7V2E0h$(MA{HxV*5*BiOEh*9R5WI6X+ULboyunEPMd~#5 zn8+eOiO^oGJi7;=o`~CpuiK{>O)lEu;arL%-|g+b$CAq zP(QHl2Gc|4IEeXK%ksG9a0W11vSWW4NW`gdDf`nha4C|7o$lN={u-<+eUptX6SbGh zsn8wz?9)ITYo$3g1Uk4}S;KaJB$_K145n8vR$wH>E)VJDfGAw9-q}l&3WUnOK|E30nRYJu>rBsomb$*ljXrGZ0jF&pvT1cMH zTaG4gMQi5VK3xa`&j$p#J>z;Yb0GN(Kl$gMgVu9hVOf*Z1Q~pt^KNNee(U$`F4lim z(~4v@{a50=7eh9>frLmQ*OAyIVv*!Tfxk=v?%h(CBrJr2D}i2))WiU@zp9n0v!@rG zK)a4X!JLstLpEO9MuCN5&v8WTK9Q%rlNb--f^m^*Fv0(Mq*{ubYgx-paJ9Gne$G-7 zR*A~Qd2lxsz=|x#21-}>MdP40q9gQMu!F+|ChV9)uiCx-kl~hNt#=`x-Xe7VtD%LN zZlD#)KlJ&c;UJ&%@D_VgO50K2cFXQw;J5p*uJz@PZ% zzD=#pwFEXRMxL`ZEzmrwZY#4oB{pbpr%t_!Z4D=e?}Mqs8<8-tcb*L8sc;aiSelSz z0uJy{!!i2rd(9K+Ok6nSheirvg{`NudvI^HfPfyi)Qfa!0j)M!A}`zRqp7#l8xY@? zUgY+cIL%7Gb5mb0QnjJ2zQp0TxkkuCpPCk^6(~)k<{C{Gjkn9a@RWmFPdJA+8h2#Hn}G7 zP51*^!E4fB)oTQmb57cglTX`{SqO5=Cv%<-mzXTh=H}${teA5=UOYCPUeKXGC62{N z`N_$5+ir>^u>ar3N$JHz5bmx-GwsD)6W>;F*j}g0xaW6zo0@bCA2b9+ ziq38+q=~EAwq`<*sTT030ZCK)kcP((_#T0v5viEe%A5a|GGa<0;i|r1r_r6{r8s2b zJsCif-5+M%*X<=ki8h7T*sA>~2cYahmE&f=Gq|S)BdXdBWpr^&5vrI}7N)WxqA3_= zx!de)?=h4eKcXS>=#AQX+HA)=<3h}+E*#%oTpsavt~Y8r>82OHKB>OO6W6?zc6#GV zc`3zGWisY_e!X5Fc2i3WQ@mrk5*EPJX>P)H7P3P0Uem+$5!LYMV&eO0VDsY!7ZeX@ zAn>LaX^DTO+R++5zrJm^J1f_n9%bhK!Fd#9*=&1z2=%jMoXKxv=ynNH5If1>9BoRS z6$UMTZCd09NT88$>YB9hx8f7+ip(~_JOJ1D?$@{P^BxWe#q7V;`1W`l2yi`Dzt$0U zT=Krby3Id|h9{V8_B%2zKWt^MZNEG&-yC*EKR=5%Z{BoneC)??%2CtVa};-TNH()W zw^;Z6MWUGROOtzTL$DI=jphq0=AL=$G<~jju`(@ACF3`;3%|Kk`+d5eF#_%$WGO$K zDq)!^z)vNG5H3|-Oh2q&GKmPvSL)Aoy@YPh&}3` z*&yi8qDA7E(2G$5oPj;F2)OjAHWzSPes@`#HM(&>qAG&V%4Xxvk7v*BIcNoG=XJZW z2C$S5W&Vpv7$cZL*V9y1DA}b?xGmo_v6!##$4Ij*#-P)PWnAFaN@h#W81*{tP6n%3 zj-(pt7YWH+H3m>%#O)w-L*V(PQ2X+wSEva+nt-OGyC8ti*A8E|?e72Ds+O;Z&wDKW zB+LeE^&NF@S5CcWYDv_bGU5yhXEeGF({Aj>=A00cFVdBFz&wYH4uumPAH*ln^h`2x zI-sXQQ8TAT%9$NeX^BhzHVJEzLXo=d7z#CF@WBJP(Fr0$uP|ogaJvTX997j)$@j|Z z#yrt^_}w_DtD?cBCtzx+>%#UF++lYg7rJd;U%_&Su_%(VUSp3cl z9H+edYn*$E`|b6#x&7DYWrLQr?kVg1>ahNH<#=S7E$;86(K6blh#hQyxi?|*x%hbq zajwy8eDR?n&cQ$>=0^?wLh@&7h>G6|ZwM|268O2D=x=oY52S{mmUv{HNYA9F?xrQn zSN3}^z*={$j_o~r49B)d+;d=7v-t_)Eo19OMj#2DAzlv}zFWz14R~hp$!z%(#nlf^Z;PQLbsf)YHuIgu(M-}&+30gl`H z6}Ca7|1D>ZjMZv#aUwR^_P5@VjBzknD4emra2I;2#0=Mck)tw>-t*JHmRyLT_Hk$a zxt5&}*z-JaOdXMqLB|-;Gu`B~e_OC&cL>QbRfP#pYQ{Q)NNLA?hZ=TcrTSX=!C^Vf z4aTJMhYzg{5_8|?zhMLhsn-Ed3%XXoc>vZtWc_(~1vDDV zU1;_|F2e4%O0}h>spGq+|J`LpC=|Z~p4ohWoVZhL3)OXb3xYlZuz;M)1erSDJ>pXD zryixdqkTlPWS3(jxNx<^O^}Um_5FN!8o$(@stcv0UtLckL`9l;hCi7j!+ow2M+S=T zO24(rPN;Y1=4iX?gu#7{&=8ME`RC>m)`Z-hy zc72OrRD`91rH|X*`_s^WiJVFZ`1?v9P7GR@dUBHV`IIl_=_OAuC-V4Zt*sN&al&3{ zta#C@>2{OeIotiyqdfWnp8emSrYasl<#kRk0m|Wc3V6w(q|hnv5qsYC=YfRZp`be_ zcy0oLc6ZIo`_UD&O;Q-&!D&+qaI(tgkq~8K=@c8Y_Xc>8sIuSAa?5y!W|BI~&%p6#xj|j~#^!re;m#WQESl~v z6Q{j;`dr*$jn`6yDp}+CW%7Cwi;yyIy3wgJ+n3-$Y8Rf#zHvMSk;LwIk@xwNup>b1SXmO%!M`Xq%xD8P;>0KdWXL;w-)3UM$*rOApjBci`r0=6J%r zqKn&J8U3K`EdYsp?;oeg)a{Q(zVs_hRwMQZI1A979FM2#uKdVYKD6R%ClYp_DEiYb z*XRWU$)+0^m*08u;;Ja7>+Ulx#zC&QUmwdBCwr%G#eJTvR-MsyK{@if8+mkkxMj;# z+dkjOvWH}BG@F&$Wb^=vwKXzFC-Na8h3%qhWQ!m9pCM}8kxArJS#os(`|?K)G9Z*z z>M}g?8uc$?m=&^FB7y{a~XOsT?pN!j2^*pYdAxTPM+*wlW-Ad@;G} zyc%vbaKYMWAu|P;6~j0Vw`6@fLzZr#0c_GEcF(;ZQFYq90(4p?J|>)T*bV7fm-A%; zlI#r$Ai5M46i4@v)sEEajA4qF{D8d<|J(`5lmaF3P7iyN66GGx;w@IyVcObp@ejXg zk*v~fA<%rgXcgO)@^Z9-z6heqEZt7BvmICG@n(ARZ66@&%`x!oyiU$QuXGHSAAMJk zn#{@gDKto{uCKA00T;|1y>Slk+{@i_=c1id_66P|*Vu!~e*&}sHx3jPz$0m@Y@M!#N)1D%w zn$Ftmhn)PrGxijGK|VQ3d`TV@mIBwf!~$EB7q%wSSO!9$*Q1^I3llnUt^DKO0Aj3& z?8?WGLEbR4+f{QXr@6S}TT%cS#Sv1(ABE54a}=eJ`ben!u9DJNF**==>sOVTTj~)Z z$^QQPPE=P#J9_Ecwp8Dfu86kdkd&>-n_O=c^HA-Y^1&q<%^J+7km+PXul^OQqZ>GT ztj;_P8{x#jDUM8c(6T1kGWJsqIFUlu0rvt;NG9yLj?Xj`73AR7i!jx~jEm#jN(76g z$DRRype<>$wvXLDyo38E#}LQdOq?#mipAdG&T#?wT2kS_rBlL1KWY2emeSFZcNcF2 z{AYsB2w|XO?8SqW)KxW1d-Fz)b)ZN~70X%(ozRrR23q9G@wHbQ*xoF|5+db$QPuEn zGW|f++xIRp>LkB@_@x0!9+>2eGGx}H0PFQ6Fe+e%BZ9^)F<_n&u&R@Q`>%yJ`1xPV zq%^`@J(X8Mx#7OhouKohm-V~!d^%mfDiVHR4d2N>T6KT|CyE13X4)Cq8Z=8;a2aG(zqcy zme(@LMEPycUblHAqf|zy!pvS}Gu8UIUF^p$EpfUJb{==-)xA#5!suW6E(Y8xGOKMK z&m-ke?_*M~%!ey<6$X*d`_OcGG+p`TPW5ZmJo3*lxu?e1g>cZLOfXo|BvmewNRPN9Pia|u&2c-xH%Tv|E z{bq3WuHP07Sa}1NQrI~7idC3+SHKj_sZg_m!qT;0V$BfoXJp@8=`cBS(H9{?UDn;< z9^ndYD396Ni?mbVQo=~#w7pi7C)q+6*-_|?_d&qO1Rm6; zO3%t>PV>J#j*Uw0GYqP8E1dNKJ@w*Tsq~FSRkLfDp(y4#gjUc41zz5P%+#FFuAY#eKgz{?U|SX(;~Rms(}jvWmdLa;Bs$!t_0* zt7qSCWKb!!&?I8Sh0l_HSn~(wg>enhujcIE6SB|UK5lo9(W?JNSCad-&z`DWD85q| zvN12I@P-}&POa_CZF_ii%O($3#1%Ao@Rd5^W4Qv$(^nNDv1)tG@Djst*TY{9MhUBE z3bK=5aYRTi*T#Ec8Qu2WTw4%&^YkYVrSUF^ff zEuB^rbMnnu8}5!p2VS|q5AOovcSMa7gy?%O|Nc6@jV3?}2`(YTu+zc2d&Fe)Vn$Tw zhS(3j3&ANlzPV&c_*n9i%W{@4KH0Z#^DX8S?2hlekUYQRLHLj?U4VKX9rD#VB=-#g zA5KzhFIPvj1CT%%7t|Ujm%?=h>WjCfr+0M;P3KuSGlc2XdM%uPZzl$81Maiam(e=pQkwQK&OLV5DfPdFp8(NlRc38KYonxi{Ns zJ$nkR&FL+c=k}2jmJjodIU6mI@i)KQ8=?y1uR z9t5-I{`$_mC9M8VR`Uhio%6&WO=FL7U>0B2K2L8kQKw;S2yIz<(P6o~Mts6|x>Uv1RHgfMoW zQgo%+|FIFqefl^K2s@EEz5h4<@2@4Y&g)}fA~|Lu=x;skasrSn;(P$w-yFaar!TFOFm;CR_%he z-BU(nra`dxloA*l ziBQ(4qII4$oRC&)4~33{zw$()a;5aY0R+pzVom ziwe8Ni3w4ab*aiB(foF|3WB-?q_(~X*^Nt#doYD9fJYH@tLSN} zPW=&5Xc`mWE{vT?O1bL_2qbbm$z%J*DY6e+Jc0cnkeX|TsqF+vpPE;B_LZyky9H+# zi*;*hAYurHO2__p6x?QJlb4SAv+;5h!s8dH*)}xNaS2AE4_43@M%8)IXDJfMor)g{ z#2zh6Xu1$6a6g(&{P#5F%QVUhntix#^+@kU`e*&D{@PzQ7z| zdz(^{V#^qsgGD1vocnVfTjbf3vFrxN=@LU|_YVy*6J$}ttl3KmR z8DaZ-EnojeU$B{D!=7_OU^nO5-G|B{!?bRx^Vz+38=WparyDY;?*7q8W%Z1ORiQt+ z3HscL+{IXU=40SwCnEv*gXRN82RgT-P{qPCuXS!CT$C#2CRWe|Tk}uf>SrdRmn16aDhP=yR7K@6|GTa$X2qB=?Ikb0A?I>7Scaz0>0gXgiO&?nYz zE>f6-MUYr5bNkvY<3Dit51GylL%sGQ@XZhqsQo$py_}Esw-NmG9$~+;E-i9itz7h) z3(oXl!=<9y}W-X333)`!>*~G&9-1-SrWkXLd?yKR=2*YgLYbhhG>60V` zV`c4c*1ihc)B~+pUClf00ovHz0>0aip<_k8X}O z^3e8Iuq(x}mqD?pkGC{MjG0mH0o__oqBVhlg*-`UDSsK@@;u2)ViA*dT>}veDA{|k z$Y6bUVCuyJtWJ2$>&El7uGpy3zT6p+YXL+frQ;xfqT7_mi)Dvf3i=sKULD|cDa+Tp zx4d^BH7Vmrq9%R(xNDL4MIEvRose35F0p>ugzdXPS;?+r;%Md20V1eF-GB2xK5V~5 zcTxA9>Qd)d+C8sv9Y41>)Zg8v;_-NB{U#e;*YcYONpCWA?$36U)q~GB;y7Qv> zb~$YHy~Y*Sk5-~@yGVjB$0hcL;r^xXehCG#_Y)Y_E3UmsefIi>2PT&MT^pnSmKM9I zMLp*Tm1IW z61UWPTm&L?Q!J2Qa3YDQ+3d#{4{5?;c&G`OG;av_@qh<+2!uKNN|w}gc$W8k-WuWe z-awzEa%6R$vPqZAeGu$+0mA1*D%7ivH&ZL;9mEesWalMGLBL8 zy`&aDaOn;0l2{%tnx5IyA>^%vd%#Pan2r`8b)t`Z%O2UPkGYD-rM1_WZ&21?+or7Z z9*hYHR*R-Qv>Q4kI2!l*#R69>*dQ&=&#brbE~_xYUoEZtLFvrq!q?MV^(4Y8*0M}W z-M66XWa{twClX$3>9TD_V#3g$Y0hikcs=vQT32nQdP(e%e#wvy?!YS(!x?5JE5P#K z(Y|zwkr1fVO|aSsq<$%PE(TPWP@it&e9)siWuP`H$3k66puTd2SjvQGo-4@s;RdUS1^#=BsAl3S&*DVY+g<<~`F^GZz5xkIo?WJ(Jc-+#1Q745Bw?<6Ta^}* zvKzV{1>C*_oI-%`eS%4w%m}BfT#IvFK$T(zO2r3?P}XMT<`5fRM`G* zCv4unpEA|ED)v51&_f#Z*TlBbW6F>^v%fD^8zP>4h$`*{_4w2}Vwtbdn=G{Mlropo z0HMFSB_$-{^IZyVW2Mb?3TG|yFa7YNA0g#atr3@@Pr=^He0NVSFHV(;Vd|fEE+9oH zNUo-4{7Mrcry0P;S>A1-D8 zRE04`9QOg}nLZ-`BsQjOim`?FH5Z<1FF&c7_ao{jI%xA1J=22XS+b+_+-P;8wSQa; z;WC4_zpINzPLn;T?J$r{dT8sls(g~YN|N=#|3yyx-Wy(0*SZQ6B_@hHg%DEUJ`sf4 zmOv1Kx#Eg-H>BPnrsO(u7^kxe_y^PQb;l4xGF5O{DgP6lfT1l{RrUm?$jBne^tCih ztHreve_}jW6M)r;n4Y#IpgO5wtx7ZlbyJ;9zZQU6x6iOO4-nPQCQJ&uGwb3i zcQOXEo^Q{N-(jBt5+<9kBm0rW6_{rw&m^F==iSys`IGvIEnv>tQKh2g zB)y8|G5ldBD&!FS;{uC0Z*faiME(x1=vwWF#gH zE~+=iI_8J+HR!ao6TkUuGoh<9V7qM~VvN&E*kIA~q(JF$jiL1!VS9hq2MJ{S`mFI9 zfu(oh&QmnZ1Fxt=+6=Nou|6z6&o&^oDUi}X!|#}=?H7W_Q}0Hk4BW;*c+~Ry;jrRA z`TBZP>O?DTUogyCdmYoXzf>iz<;^_9`$_>(zVK-(=&PuTzTv)eRI0XpIPK~)zH+K3 z!vXdd?fjK|(8ZJKK3xSnMWH^;*xY(_|L1$CV5bVu9R=e3TluTGz58eMFJHWJU|!ii z!+8Eo^CI=q>)li#;H3McKi|Ur=vzLmYUAbQm&W}K^?9tL<1Jo7nwdY-C9PydN^rbO z9ul-pI=HQn*>ywyhU~{eTi5J!_dRnc8x{dZ2>~U_B&K8LdkJokB;#m3CLsc2f8Z8e zEY?g*8;pHE!;XjZG~&n1LF#Rf7kJK39`XlBptTNVh*ZCn zUFq(Oc^_7c6fW-$=Wgw}-t;XtOywx@yY|up{GnuX;;fvjZ%hnq9t-C`I4WvfxQ5T0 zapTAXPB^K`-tq>hGcQ_HKtu;Zv7tMv{V#GQS1mDESf^!6igecW@2^%9&^l-(7i@!u z##=Qt^m!5rL?V*E=;Yp=t}j%S&V*(K(gBMKN8t*4Y8v>1(lT2$mS??vX;3c3isZ0qy))6-Rp8;}WVj^4PdRAu)iL zWOlGRKBRa!rp^H(&~y(4$54>i=d1nZJAG3~upL4x7eBI=i;J6kwY=N37_VWZN>0)S zCfx3cM;tf}(t<@*=%3ApiW(Hl7xt&0jF%fF0CZ9!5t{8aZ7XD?iMc4f@%4={_7vjf zmjA`r453?ps_#FnB@L2iewnf+l{pY?tK%Dd$ul8qe9NQOW+ywRS1x1NwMlh1cVn%d zw}49NQTT!As?5Q^X*tz{h;DhURLN2}S+^nP+@Idl%tTfwVy;E%xdbr zS8@`r=%ggBq2C#+Vd8SG4)_RgEIsKZ2LeE?B!=v01*PxEA2=$&u&1eu$l3IEQ5K5J zF#Xdjll>&!Lt4Z>F9Z1kIluklxFA<>ERW^Ke>(=$f7rAp;tCHR=mhVQ*}B0P*W%19 zF!z9~rK>8du6HtQaB@&uvPk|n{nB)|*PH-3{K)qhff}(#Vlff%GkSH$i@s+V6E%VZ zH~dLxt11(dGhZL+s`VbDV;o5&1#vD+5ola|Ud^t)$F)BaV^(SX$5}H-N$*y&Hnnp_ z;I{_eUVDo+-?IDsb)!1_Su>Ys@qZXH_R8;qcBRdu{#HH}ZT!dyU~-2t4KDwA?xld4 zx+(12@qAx*-L2*LBuqbO-baifwvYhzkmWH!aD$6XKUr6N7^fb`3KftweRI7Ish;sn zfgSGgy_TNvNAodvo@{%))INKczcjMX+D4paSa>A;JX|;Y*{?l=kA)FLFL>LPO-Q(3 zJoI`nv9H66*{UzB9IhK|e`VXNt+E43oMMZh|GK~LzDzu6s_5zcT&!7GdFx4wM}>`3 z1W`JPih53GCOe$4j$vV<>YJC#D%c0{{d5W=2wNUY@J~o%7l`ufLeHb zD=p;zU=&`41)0;Is zdCd7TIkw(=fN$p;_i+(f-!~O?1mpe%JA&kV8ikMP7bd>n+!}%T&wN9qu|};H?gu^< z9;T5znmJfjk=JzZXyRINqqwr= zkvfTAPqQy^*1|ek3LZ2n4-xCBhP-p~KiTx{>teVUHfk$vY68FeMV~_oO66xsy0?gR z?9`VFubzNDw=t&YH0+*~&w4Ug1_g+i1<76xw?V5Ta zZ{fhuQ&g-Hq}mVTlK^gfxlrB^lkS;pSQ*0Be)xoq^Eu|+f|V~72qc#R$YtPXpDThUw+)LK}6WddW7}?4N3Eu9P z+=DmI;>O~QcS!yPED%ItEz3P27>dJB)u=0_&@*Wz^3hcr((7efz20R=s!j~lZ%rPj zwKB4BM;%Ysd}3k0(|A9&22Bzh2){kUkJqT{A8ph`9P5XW5GE4hb=QLL6^Bw4#SKx4 zTS3(giWE?Qsu8Frng#v#^y5rY;fbDV4K6g!ydu#PNN^eagUG}S`1C1k+oKyg(JF+4 z)fx$#D@JhK-FEri<7GKhF4&|+gYCq&B%&wgTyy(kKXPurdeyvH}K;`mvX zgx>a%B=FXjwXTh5tBfC7Wc2m9fuAasHSkw6Eq6?jp}*5zCG{N560~Hu@4p`TXE7N` z7Lz~PY%q^UhWM292obTXMW;j8TI2PdiorKwDYR`-*4e%{$2iOGfAl96c$qNG`G}K@ zFSB;aDQjk}?&qUJ1f_kA(~rhn-by!RA+ZoE&CblLCo8V1#ya?4^*5Ukzk~S=ad%3> z8zh6{ia+!bwMy0fC#%o?|94;SMHZsfbRF*p zE=FkYG1CntLVSyRlI*mv%2G@Z>^sGJ6KvR^)1Qg)`_3D&GF!CF(DLGF=dVI$nq)PJ zqxh$+nr=nERvL|)PLH?FM%+n*!g_BHP?6W@1&K4=hKtrt8D7(yV-7LC*CaH?9ia7t zZ^lN`2k75MyVSb`yC<&2ZajhlSBUj}^_Jt&mnJlZKj-x`*R`~iLh&X)jo@SI|I%Kx zYXy+k66A|Y2^=b9oJ`^m8sQADpEwvH^&$sLANhc4W*9=}vR4>^YANpNDg;GKv*tSz zxbxr#Jx0Aa(P)^(JC(DNuo&wnGt+5?aV zQIZ?PN(=dO=nv7^j%{j5^F<7VGYG}Ainz}teQ4lMius`xNY}LD`_*}42tXin3Tck##Mc0`45&Ii3uy6zYx$q+%Io;M|7mk# z_bqcXDWF!t$bHq!o6DS;{7|+k<#)7J8h=4p(ZF0jks(^!Kb4SaT8g}x+b@SG=wua? z^x{QJ*!3MjKd5|ClxGEMGJZc>BE_yf`x6a7A;g%i`N?c4IvKmM##4G$;MJShb+%Vb z{D7#06A83T%UUUD)*y@@h4qb1t)iSyyyFPNomC;@BHGYY%L;=3sgB@g^(kKIlm}k6)ziu$4i=*y{3B{STTWiILoFlFIZKln>eq9z0B=3 z5LCCLJS86GyC~ArRuqI@b-V_PW_$578ZH&|!Ch1xHtc&{aX%VNbsGLT28cQZPvU%@ zT(g$llm;=&5#ts&FF%zfV~#ULe{;cOB0$mGSxNgxJSz#SgO5Hd18xdI+4!PcoX3*Z zfG=`db=ce-rI_Jb?0&|%-yJ&7<5GEF=W%EDi@lce8;^1P7T-trmz0W=PD3zOvkVY9 zg#3K+5TFXvX(oTZdW8Q_8%MjrI&+8S$<^nHdJ1oySkZC$#^(43`UTiQC`eO}^dWA> z1&KIYbIfDKW*TEU1kyd?$}(|_k{btl0$5S;vgJV*CK1GGwrV45HtkZotFak&oYQ>u zVkGUTASs5bXW~N$IpOi5yO%Y!rL@3>B~nKT z5WAfi1x=;np1X^L|3q5w_#|(|*Y=2xJD0Bd&>D$nWlS@I5lb53W!9CG;^=skUchj` zV_Nm{3>KR=HMd=?!~LVlB$l>$JO(8 z7YLWv^G3ckeEn0orD{KZ>cY{Yv%$J%<8rI<8p-lKz~xvyd)L{Jfm1##03%pQR%?yJ z$YJSR*be8!0hZA6=S-3YRgp?|sv0HeYWv)*D;YM30Pc41|ABPyw zq-?7{%|332EM%|bSu&|NuqYGQI4o55UsX(g1nDC-ymDWaQsDGXoJSDsOr%$RJ7s1h zFvhWbn%q68y_SjpKro}Co;Uw3g9GRC_l+)^b(gN#Q=$Azx2bVALJ`Y`ob95_$t`qL zP1d%%LMIS-p##u6_-+`cwuvjTI^2=nl$&ThxSFdN4zgPK6xFUER@N<$V@(UGItSf^ z{Xyi7MPTSjg(HH`*U|w6LKjx8FYBTtA&_Wfx*rn&)JVdNUFyS3`B`&`fs3q0Uv;&a zSCYZcNCRSqPqVAegWB@2>I-0c`b@T;WN`7}hAv`|Dmq*kY0H4AYLYLS(E@d8F+5-< zA4+uWNTZ_rqb2<|udte>wBAr+yKUuIoe$csgL~Tt>Ab7kpD64>Xe)6Qpy6yJV^1LE z$x(kfVzF!_e^8YMJt@aM+dMezRseuvN#B^kP&nzsH~@={r8}1%UOgS3%Bs;dfqwO; zLjqQN4mRYtPd@U>o!FoGM_|8?32;FY83!v4lWX2eFhv9)077IYalnTg z0Enpd10-t6hc+~2D4vAqs;)`wIa{dg9{1Xlz2L|4WGm?Z0rJANHV?jcK1SjR4cv#+ z84@*25oQRl={0Ko(3$Subd{B0_LJ3Zy1prsX`ETDt5u?KIN_p5u6RVU)4Y`wD|FiU z_9d*3pY?h4MVke!(KChIOtid#+CCMwQ2lg2M?;Sn?HL@S_a`LKOl850S(@xk#KBP0 znu|sP&u&>^8u&8#v~2imlU%weL9nx7iFfRNKk?_gk4XhcRISAlM~*+#?iA@qyhk}- zW?=)82IE)nH=q33{)U>otIUl(L3)K@M~imGT)SjHCaB=}`cq%iGu0SG{O&TEyoCG! zfUYunj{Xs4S+Y9)pZ9lA3^oDbO;?iR5=~cR^%rn?Zx|uoR_ccRlGtwDpbh0FHWW5! z^9vh@0Vp5zRTh5Xj%?N79~7PZEKeO`&|9a&d|0?EIM5DcAU@pupM=PVIe+eToCkyK z-%3wei_9HXKzjZgnz$7=QdJiwYt_`UpHoj~wD2Log3iRv)t08Bl(o9+lAZ->lcoKp_@x_-!^YO|otvB!hPnz<4YdsLlhQwxFUH z9lrCX&IT{1UAerFmu0tXa>aJ~(+7@;~( zabw7lVjB{lINPGM1%0 zE`6Pf$}i8YJiPfv#`Kw_%;!*omLOtE;0(vDT=JZ!QZNbUZ1npIq*r5@$>;H)2y36@ z9kFOqVx#b#I}=+!CDrks>{YF903EjRG+#p=baeoKdQ9bP#zi&5E}T z$v?4>Yrr&$8Pz6{$#Bmgp1bg`E%h6?+?sGR>t}bn0s%JAC{?Ntr;jd8RM;NcjjSs5 z0aS0KRCERXL(shsvs@77nZeRb~3X1~TxOez*QoL=29@w`<}g@-<23xBgH0C%*a59 zo`mdyrO@k;xqp7?6+*oe$nyA$@dn(8E16OqChU6~Aw89$x;o8i-_9(M_UyOHrP+O` zc7cme>Ow_8s%$4S%?~ki7U)&NSDFW-l^$XDxp*5ab?}y?D^|(!`e>I%nlGtvfAi^# zv~j(43cchq$xglL0%;w$8D2k4!R)5YK}fc=(f+6Wff%2(zCSUB!nZ$qxO*NE33c$6LfcfU6) z0{yaQY(mLo{Sw|j!pf|$LqO|iwO-%)dBTBWkm*myO7!BE3RA-yOz1=#C1S#6m&E6+ zZ7#0X8?9YPvI(2QstQNbCvwza5YQt zQOVjwTsyV3-o|HP#?BiR5>za_4mHUX?4 z?_|DVg}F&TPy8V`W$+6BdjUA)!slaDP4Tl_!HFK0uryC!x_N!4womfJE%} zNv`Wu%@w0`=q>dk@cpf#kadm_V$CN%s?^FqO4CNT%VKzZi$V8^h1?fXcBoJ>%=jey zVdeApb3F^uZnDY&prhccjz5L6Hsu5lE?t}=YMvnHB*k)(R?!d0Pq_}p|n5^ zaP@bt(dj}D|3+kjHH|pa?-b#jXQ~e?Gn3LKx_Y4|nX1rY(%({Vm5ELnbf&3(30{5Y z)HYJ{kG@COpvv=COi~h=o@yAfnwgx-4u5(vp&KA-Pzcb|Rs zpWWYIfw}k2%$=D#bIyCtdCvfmxl?T8f?-B-&-gmYTwAGnWV9yI{#q_m9;FO(4! zdv7+jI!&Q7{QJt&H@n1HFLW{IB$T4Jo)*iTOH(VkXov3DbCUm4l|u5$IU z#E_C&#DdbLfCV4$vT((RO=}>8MnKgRSdOT=vj>*iE;ZKoKtdrwMlizK1hj65x z10%Z+^`gGbZ{B=fsE)GWQ8@QkVUt-J%j&~cSB!9wAp(xLf|gU72zq@9Ct{7xzy+;?z**VG96MV|gTDTmPY6~=0l|qiHLi`lY6^HA{;_#QB{`mgpO9-%1gn(dR zKpcAr0}iP`p&phXIb&bf52Zy(ajr%e&*F8+0f9PnbT#}@?`xm86isocAw(tr!H)$c_INjYyh!htkl_dQtf@zW`jvPrKMSyDiM(0KNbLW52v0@y5fOFn_vqf70+wSKLz!x5;;I4}RktPC zF{gT&-)*jikZ*zyg64*uKcSuL@B^b|rs(n}Q}i{nJa|P%fQ5tm#eP{40GftpBP$-+ zPA!_ONs^-kx+69s?ktFN4-P4#E)Ri(jhqUQO*0pl4T@CNTN|z1eL+~K=%an9q)6QvVlW1Zr_6)!!t$5K0+cm%qjrKSI#h28Y z&BYdCuy2~ig7o3iyOB83q`oC@c;F9&UIT1bmgPJCr^k@aOzlm0$!3DEIN*0!du7$V zkCuaZGle)+yEu2!Bz;^YdPvY`r>zq3t$`f%qUe@Xkka@I{M#qdV~g8T>ZnR(&i$O7 zCo#M&u1(X!vjV7V); zMjo9Y-V+eto5;_^V$aoUk;8($tqf)0NT+B&=}#`7h_+A=&Nn6NvCx-rEISOl?sfmt zYRKzz&`h~5koC|Sn^#AGd?Mvk}@d(l8 zJ60w){+*0n`r5lek`vp30^%ubfqcqI2+7NHmbqDdXkXvSv-DQ?mQc8a&%ia7UcDp( z5kssecm)bxwC}74-NwG+B}uiqY@a8nESFgoydeX+2ur+}yqgF-G_@DT{U+*&)3y}xZsY{ea)^ruG z4P&HD-56jI-$=)&Nv?_?PIw%pxoEj$jjCtA`1N+=O5jp!)EXSrc(C7lFjzJG>}cjY zp+aHgJ9ytl8Ow%T!s(mtgs_&v!?CigP+oOuv#U?bp+ry~2>yla15L3<(73C=nXTW1 zX;feIP>Wt%DbgKhwix3Is_g?2^d~{FEvLtpadEFtQ7<&jBKe<^qp@?SLZkAOuBcx> z?bYV-2jSM~ng5z+=-)7Y&icw$u}>c9>KHyesEFbSF&gOSCm!=%vIGd(7kuN0)QPIX z{Pfr=Y5EpBV#uhzR5~*QV=4oTX-8yNsZYG@+DVWyL+q!{e{FpdAVz8?l?>e1bnm3l z`yZtn^-`XmWAgs+HQd?efSB0u&sw%=VI+YnYQDRI9dsiueB2- zoSgFR5ZDh#+sZQbdk_OKXh~_l&0FXF{5yVAD@-?w4UrzCeN;{$ z(s~Zkj1!i+tq$KwR@aLodT#xc^$g=f*%yd4mJiFlg8{-^8;$``Zt1D~X&dQky+t%I ziYh&d5*Q3GX(<`uu=dgm$Y!ZYbgwQTio!@&T|82!Lh&ft)J|V7CTsHc*>K@h=l0c| zX`|zW|MiP8jw*bGduY(?P$RjJgXYC``(mFklVsAn^`W%2p9OLDa7gh$o+(VJaki(CFC!g+Co%I! z?CP2L$ssy=*E*13ZYN&rr*u%=^Pdt5_^d--@f;r72BxgUakbL#HQ-qsR95`^Hs^l; zm2_3Iof;tGus8As%uGy1fEX0b{~vilskFjA)Nu(09HNgu#vKgA((c`@m$@jyCC%f z9^!Tp7Cmdq_Oyc@)i)DNET$-VdHMJJ9C&v^9sRy=p%#GTV`-&He`@+nml5D-3;8FG zr5+H&>ZuF!GrPuI*u~t{(|c7phSbZJQtiCrK$Nn0HJcia1 zK%iL?R&^A=gXJF?-~+^!92v$DZDbNFn*^r)bY&MK@2+lahSZN~9e8f_FC21MWF+?o z7&bHIreraMQ(P+&9e`^BjPWxU62ZPKM0UP8a{U8b;6LsYqTqozp$Dgb@Z2JPdNMP6 zT)Ix%=Cl)SFggBpVXM2R+Hy+5z_eVAxi!P+Yvb%ZCrSZK-Wq~-* z;a4gVmBv_}Mj}1RQh@_FV6R)Z7-2UQTsqw@8@qDArfnSEeZ8d_Tr7@%KI4N61a$j# zKU_xhLusBE+Pfg)?0*yqOi?g^Qj|Kf@hJZST+5z@@CrwE|J6>EfiW%%X}Vxa7Z}2L zXi6^ln1cW-=KNYA{ic&o!{f01oUIVPuM7J)au?4J;`9yf=i)I28(-XkIq}HqUjfdA zhSPEL6yvDTYvwMFaiQeG@-Ch4Byq*eB}{(?H8HTGt``t>^;E5W&#cEVR-mvXb7@nP z&nA$O=_ji6NxR4gh5)wXHrM%j&=tQr@v znVA`jAHlL?@~ur*f7m|vOFwYA{uu77=<#TgyU8z`!XK|bqO3W6*%g-4K4e*4E$~9F*?&X zg|SuiwB9!n9NK}bl@W1pMO!Izxw7a>h-t;+3*2T)^|tudlp@^sYk-UAZDTA09=&VW zZk7rADhFQ@CSMan2>|KycZb8D$z~!wA4&6n`leNu@E%3le`DCm{DuZ^w%??o02{#y z!V5=_@%mm-%JG`pMAmD6A!B#wc$Mi!_xJK+d2zKWs^Jfo2CV_jF#3em7o8&YD6&iJ zT%d^4IGMl1{QX%^`@2(%o;g~PJjDRg8Hh^(Vy6!IM(ZB7+TpGt`URI`e{vkw(lidd z$<$!eF0<3XVu8)N^J9r-wulI!>A#nuLke>r`m->xo1_H$c3u2ul}tl7WNZk_trx0g<42*EG%!Y& zcR#)}pisG@n6Ws0lcaDkn=V1Coe{{&#_ENZR^m6QR5mG;`O4&UX616e(bvBU?@~fk zD~~!@+V$aq*TqDLK3#Ln4{?$)k`pd55M$H=7)9c(&SAMy>N@&Odc7d_k-YGnb>&qA zuvxrxH{7?G;w!v=$b2pLz2#Mh0H>Vc|5TqQp#SOP%6Th`uTYYRgneg&HSY@XR%vV; zFM} zxTY=40&_T$L-K7(VqyEdGZ~I|I08q|;BF5IQHX~Jw`HcpG9yr~&2l+5Zb`yH#_!R8 ziw#dsx6g57-@tF?PIQnZRa>2kXFFWHK}-mEAZA?XcMJEK<+{=|lD!k4J_ZPy!mlTz z-N&8tU%=^pisMeA?=sy_001)X_!Jkn^x95e`3*30ls$^k=cxY%H!>BavxjK6uQZ(F zi9*yG61z!)2c3?d_%z(TwP43S ze|Nian?QNUyU%i=VMWOE$;#&eUJDj~+T%^`%X{5T{10zS2;E=+pNKXIdQ?)?&n8HF zEURs$jipfuyk3~N^)bktZ_q>_a;LpJ0Us;5e;8p+5NjC^@0uF8w!v|VdS|Jhw;{`< z*r`SKj?ewSP|uHI#Cl)nutFVaS_*Tke1$!C&~L>8W$%(XF)p{q@`lNdkc6xgs8)M# zp!i8Te>2~fO9)z69%D>sugZ_?sPD66w7uUFF@C%c1fG}eU6v&PahF4Yc9&K2tr!Lv z2pnUtCsOZ_y*44^1B4ZvSY%}(`~sEdo z>)p>2oNve^{|_>zi`Poe0|8va-~j-SX-Wq*y8H~_1CFPMQL=~|{q81gH9BV)EpQY8 z;$~SYnUZO@i`h#A`=Fk6p)7*?R-~noe(%j$(sVyrY`l)#&$RX1=#R9NM?G{2gK7c# znO0da;K=TXjg*w;2nqFS#(e7BM0ZGgGPMg=3h)zXK#b+snaI+&|E1fsKGIrqdli?P zlG3sGtiqq=%ZJjHaKH<%hYl)%U+c(bbj`4-OxfaQk|NK}lmw`JTB|+sQRQY64J>zB zO8|vnm;bFR4}|ZU=8ybct5*-SFXg58qHO5D^nO_hjT75NJH7F{8S1VBbM}5LUk!5} zX9yXA^nZ(NBpnSCESm$etsuBw%} z(nd@$Ad9?0rp|*_6LD`QCSBAEprp+SVyv0jOxE_L5@iA?*DM~1G&VeM@cW+j$8AmY znJtScz8{QyQzvGkOF#3bd9SB`m*qMtvj-0$3exJwB%ETFkr7#ru#uei{ZjuMQQ?bY zkl*L>>th z{w^w&6JeI#upt&XOSV1eX7K3dJ)OAyfA>?GU3}_Ex8NZv1V-Z& zNxkgY@FWgu5`?pqF=Gj&UU`9^H1cqa3xgA+pR3{VgfKYXjNwnOSk$XO<&So><~2rt z3Sh9mwh$oPrqrq+|U|vD7_&EOMi;Ve`Q5W9<_1S-! z5rAXP0#cHMlyJiCT1dm=EYC{dXy)f}NZB+|wfdJft(M}fs}`Z|3)^*Gz7Joy=cGK&=;=gj|&zD*b0npb=D z@eW2O4(t;TtVqK30}qzqH=ECyeKTU|6)>~WiM?!wfcw*&X&0ecSVd{Rw-P|)P$Jb{ z#uuc1AiKpcKv%n0P0a-4H@K#v1KfmOx5O+*y53VJ393&|`Tyhtb7HhO8jSaR-IHB; zoQqpd%HHHPW`c=ii2~B26&Hks8j(^2M3S*Ae-02hJ{F(w22t?B0B>>(Qm{Qeu|Z9d zg_pc58zWCn8~A-Q@VQm;`lXcspjs2u;@eT%jg)O zJ-?6At^cdJC!xH(a((`#*h+UqOqxb1@wAk^(e2UClB+7*t2$P;K8}rvdCV*rTI&Df+ugG1NwJpy z2J=r^`Q`{^W6!jO(HvlTW@?fH*}bTGKU1z}?*Bl`b~dao>L@`MR&ysF(B680D5zt> z=ibL%(f{S-bg|)133HY0(|{Z>RSe9pnwgTd!U)Ac z9mZvw)^sdM;p${vC0xA@=DfzpsFrJ>UI4Xaac>iuYGzt&$iVg;4j9M0Cv?2KllUZ6 zR*q&4b@yo+^~od4Skw~SAJl?nrqt*;W%!swL{!EN-w%l;#s!B*`%{a4+Am3m}*E8h2>7B z6ovD)thzg?cfSW(T?Yaf4T||BV`33CbD^UdR7~%WC?RD^m$LQUpfAUXaC#PZTCYJ# z62~6$x{1?>cXpvy#5s?PuGhO1>Oo0EkX(ll1%(f`k=2A82f(7`lm($26_FMn_HXgV zu0B#>g2Bi5J=Zql!H$u%DxZ&4lYxkSfCMA&7OH@tSp8WLa8PBj(v5RKP+XgyMyJQ} zcSDdG3*oPocEa`&DPqL3alj%LmV;we4eO*|uiVmtZfVSYz_Ldo~^OfE2_$ySnZ z^Zzu=ax*t)sVh8c@59pEdj;8R%!Q^T(^!!BGfi2H)8xY5D35Pwwv94-8Ljz`#DS#r z?%EQ>{Q%+0x7`wj747eUL5F3>av699XI2#`$#1rD_QA1g08!jEr$_(F>I+B$Ly=k% z1XfDSDHahA7=|{jw1dl3n}F`T5&@OGe&3UgTJJ1ZCaLP|wS31nsbmgN#MB^0Og0~_ z-3<2%c?}x;S1(=8v}e6t%wvtcjE&D@KK93IKATcAUwD0^;Spu&lg09hMS;d0xYu`@ z77Zt_urg>dsT4tRE#}t474H`o#ZO~gHy;sy2W(622t5OQ+6T#2S$TE)6yab!bKs&z zTM=;PCjj&vJ25MB&;Q~10P0|dnSA@Qv$6>o$Vd|Fwv2L1W&Mu`5^cRCe=;mA648}( z2uxNuQq?VA^>zL&x}Q467JGhu#2t6p-UT^y?`o%fl%7ge1vo|Gk4_y|j)3XCJa?!B zICdQ9TDNWhVL56CZk|jtEExpfyu& z82E-P=>Un)xR$I<1H$5dO&ZY`fc4Bt0@f5OL#wmVC>0=GxvHH0>JcrB^Wdn=QmU9Z z?i6A9(yo^l0r+@$jwzYU=ka)#WB~zTanX-ctpowSiVAhIyLA!qNmYoDI8KR#-@OJ5lvnvL(wKamI=X>$Y#R7=JWf0Vn=3%yjALS>8NjK>jszmocw zMb7GNRM=WF-W{zYh@i>2(m_GTjMJ}Sd`giV(DEy?#Cn3RbduZapUTeoNG6)J>IS&*J|jc~U!s$sqo7Kd*LwmyLU25D_!%l@7xpxYdI*F}UP0y0z5 zz*R|TcZINnD9tRAjuVL|N?dsTMhoO(Y;T=H`faMrDIuoZf!4F}A^JC+bR!hI8-EE( zrH^0?pMH(zuYw=CjFKa81}P}4IX^Vll0(I2M1&rcvO@Rof2r|M#0`27v)2)INS6bo z@$O!`wrF{aG=7x1<&xe5vWT&R22(N`J!>+FGrX>4bA>*^T(seF8jn2{d$y=ji2;cq z(|F3+l^}n&O%uCXH$u8F-Ufmn^YGX0t5k7wxtZ5~5bFjBB~ASoPu=s-73pBh;$&AF zNMS)n|A>b0Uy>HgvQaV)mYMZgAm<)M#)&0Z(;Ftl+|D(m6BJY#jP4tHG>e&?eLaKz zIIm`pX3LqshqUBE{z!Uy5o4U5@Vjq_Pr93d0p_b7{!U^)E=mBI64My873Ev#S3*J) z8NA}Pay9ZRUh#*xG+j1U&DqI`WFmstIR95Qv5sRFEv0Md#Nd<{DHK~m`D$Hq8}ZFx zW)Vj~`I8U)EjjBuLyI&slja}qGmMTrPoxO=%%r{I)0$1gu!ySQPnkEx49+gM`kySt z{&xS+cuyc7&*|&n-Fv%Z`@+BC=EfDpNpyhG+L<@OR78^Hm1VlOsu6)qNlaDH&kebXVjcsYwR- z>*uc}>4&Bjtb7r-H|CmVnQtB1>zPV5U6l4=wp|g--NV6#AY78HB@u}n-wb{_qiB;2 z-L$UeL)fSJxYW8}M_MQE{Pp!HKYC-~o3<3$sm?os_&;@z{OOSyyxk+%X1mwY-<;0j zTvHLQ#Bq7SXkn-`W_ghTw7z+1(Gt)_mGb`P*h89(g)mitWjRF%9@h}QBWm}t0KT`m z@*|pXlABQL(8O&qj(f7!^ac?U(|J6Pc*;vcdX~PWPRXu|@o|R^i@hC$ZP<227G&K1 zD5`V_N({n19YaV@e~?=ICNg&jR^lGAa?eZ4aKG8dk@rO*YEr>l4BSMIgZ$wfth=*s z^%2cM@i9s)WxXRwZme6$yip-9XVB*zbbFV#)2BJ*CB)=OE;r(p9XrxwRc>@@FiPp9 z0(h*&5u*Z?Mh1YPUQJW48+CQk`p>h$Xqs?uff3jTVhsrW#+=m$q6f2m5=bpJ36NL1 zYqA!TCQwbmdVrv>T&uv*-A#J%9cLy&_^|KemSrI0C#Tdk{vbPu zWQA}-@db6Iw z>714h&W;ir6sgR?jD6DiQOJ|>4SnjK5S3_$-zYYiBqI9u(S<0Kh&&6lR}PC8TXh#H zG9fIS^L#&B+yUD-&n^=#W+<_RqzB}*;C{Ej*}7>)PVNJ*0uw+CMae+@OFli})KA@8 zZze?)C2Qq9Kwb$J$fdaA?GL?tqx(hsST?08XmoinQax(6oH<1EJL;2)sdN+?o1y(2 z?Vu25$TP^xd_&Lhn3G>xJ=~(jFSQLn% zv@%bSXnkWEKX#qSOW)>JE1Qy&LXq50$DRohSu6e9!8XADAa=dW9huv+RTSB#d-!#ct?N1+g@zt-60 zc6d$MZxvd5ZEj1P{67(i?{xbV8gJ{5;_irwIH7S$}d)#{FZL{H-_ zyZj6BWllF>f8PPQT$qg>9C=HsDwuO*IR#q&PKS_weLa|vqpNtL?YBsRO!=-Sw5Y5S ziH_KA$ACZnv?y>*byQz6TWmzc?TQsiT*9A*d|l5+5?l9JmpU9T7XwWzbZyO4mwYz% z2U2+`Efo?#Qzkw+)3y7tJjd@O9h#L|D4*~Uv>NgZt(~_7JmGI0pMuvTH4;??@5`pj zux{vITx5X#P!>V&ty#v$+ z+^fwi?!DL*$W9Bkra3-1BmZ^NiK5B0@#P`qr`v0oKEq`znOhrX_=GObLJ^pV5|0=j|aUbIUCA3+|yXygtG;4zCAX3Tw2>h${X1``CCOnpk(Uovtbd(WD6wk@v_ZM_L z)r!?^I_{>k05Y%kicI$F7I*Hi_QYByTm@qiI368^F8}?lu1|vAfWlY3>~ZK3IM2&~ z+1a&AzwLsw&J7LW#%}Ub+^=9f3tepBP&z*|oPw*bbp$Uj_K$U%$&=^2&8W{#PtyjX zqv1I$>XQ39`~YiJ1(1Dsn>q+fPf!1uA1Ak&SzHFXx;Rb#{b#U7EuF6oDLh+|uiGT% zQ=0B}zOaHetb(Yjs>`rPq$rb5JvjMpm7x39H2{%CcNJ2i^HyVn2;mZo#?lh2c`HD| zQC9Is`?cwZxryny@0nqZ+#8p+EF0`=|G6m|P5ivQZmvHa`Zta!KMrj|wIWicFUZW` zw@X6#tz+~cNY?riu(BFEA}2T-{<<1xHne>My0M@q*D+zI5GLj( zLvyQ_jmRmql_SatuSaW^d(85re!iL&@3(BgprYT#uE#O#Ir9pnS`oj%^yDg3u=yE< zJhR#-`&YSS8fsZP_a;YlKb4-xJXP$&eOhoI;I`Or@B;jz6YA*kH=2MnE&7a5zt!#p z2o8JoBSFl;3Javo1xQ8rj#1FS(-aypPmU9L#h2i*I#=i%2FR|+bza(tWr`n6Gsl6k z=Whhc!JH#m6808`5!pKzTV=Kje}dgAr_Mpn|( z5AUJ9EV{B%o)+&~oa3fHIKJr?I4Sr-PifQZLl2sKP#lJN!N$3fMg8I2d61O5Z-^6A z5T+c1-XK^Hq(jQ7x_hWVIL(uQBSzK_(Ty2Rn$ITJQc3^f4?+Z({YQP2??%f z^Lw8ib|K4M@kzJ_n(_W@`BLiWwb|DwSEvs(5qpZy&HmVl3!QU#7NouO`d!I+e+F++ zPML5)i;fyRpwiC%im&PFeNaN^AuJ#~~Px6d!5QU=5L&?bQ!CAa$O)tt#o@VW%bFy%w|DR10p6C!$? z1;E3Dhn(=4B%4M>NZzG%>eCc{1yoDjTFKC>+X!h*FrQZFTE+OohiXoX+p$Hgoinz$ z7lhpjN1?kyPo_zFzC=?v|IjVgk7f=TzUfmaCbHm}>d z9jh#Xw&qJfVOJuHzWF~Y)_-%OccsR&Zt??^h;NX&SeAInGlLAU=n{oMD|LaIQmn(`#H!h_h#vCj_XUb%-ssCNH% z%6A;Ycci4Nugh4Gk$)!E6Bt?ky@eI|j)TfNUw)z;I2hmXP<4HFsxEwOFlZ|^-1XVu z!#gV7yRqoOo}UK)snWXVT%0KmxMSwn9%##=Wk%$d7^@jQoXD)h&X@Q7kKa3c3bTzJ zd$LP=hr5ZVrjNzp5%1o=TnZQxu6sZ>{ptROz6ioeNz!}xaP{hO)Ftl&ud-LduXVV& zHo&FV3L9sO_IfEa8hv`dl=*Y>0=TAhazUSYvRS55QZA!-l_0@-iH0EhwAWj@x5=cs z?l~QByc59tl5F^oAnBlr_Qom80yr)+D>K}fq4j4phAkXh0rWQ}!3JW-?#J}RC>deE zU9$R7Iws|NVE4$dX4}a_%CJKOZf*t0qDoFD;`G6uqw#O0s`I;75!p=AWaZLlTlp5a zpb17G9T6A^qCe!_=PPdKNE?{>6@XyAjM>&0em}r2>*zRH0>Wz%moXOgEIbACuhf~2 zGK64!uXYZ+PwMEUJLp5aVabok zN!OTbhSYg-ASaLp{U2>US7R*xP_`JC+7BNhdkR+2>#x?gd??K#KGl+l23+2FJ48@0 z$F;nM+J|V01+9fy81B;f|Mk&GmafR3VzY`zTBV^q zd=B&>J?JXB#8QRgLq+CUU9uVcP%X`ARBu(K+zrD?*H_zD`!;BXE-NWy)e^h2-pd8t zq?tx3eri`5|1{>D^tq>zGA67bgUDEO?C)n8#ZI&5%(qf9zqAw<{%BgSex)&S6Z!Dx z{5Zhr<-ASYllypJaPVy%HLqUVW#&VE^vAxUaP=Civ7_YpV7Ss z!!P$MDcU)M6sHCqFcC-L)vEh zeXZ_@OSp>SwuNlP6L9nlXtW7vo8dEp z;=FWWJm^!toW#}N-vFJ`QfsSAj^kU+zr?JVE9-~N{>pw|aoBKt2GdW?w=kolbEplZ zmY_oaNS*!P9oB$e(lnbVBGS+Jz}C#hR-!EE8ZSP)@>&a6);qVvVDw-bua;35tIk>* zY-80aEcRX`JwmE16YOlgK^QTJdRcDudmt^E--Cxj5RvmlvA%2~*qKPI_^t3mP^<2{ zPa?9;5=o{6^{WN&LB`Pe+_L2Szmk`>j_Y=TbjG$VJi8{anVVzJ)Z>N_PghUQ2mjWw z$E5Br3#gei;^OkO&s+Kb?bp^R27ytCDlWYfQ~gBZsH4||{)S5$9ZA-bdolgKOK2Y;H=7eZigcEBGbBiObHi}T zS_&z+#B1UpTx_a!g#BhEF(}9l!@2$7YK1?Z1A%^8k~6N7%F>F${0=*bGc;=GL4lny z>qq9#5w!4Jo?$Vh8XZ?FdLs*%dZ=^-92aza+yjnvENxcsmP!}?Bxnhwqog|O&(?R! zAYQ}mRsZf1#9?f)q_$$Tk38uB32h&P zk(7Tdods&8wTb(iCzVUIW-GYD{`8-V<j6deU|9!<4~OzMy#$laBJJFmiT>S{?XK9=+EaTdX!uu!LFNS zm|uu@8>FNgY013e;LOuK!k>hlotS0;LKi07?VaVTNXmbDnTj@NayjH%T8)n?Y!
  • )O2+=%1>VBJ+eB~I6KaF7&jGmu8+h3T zqz3n(Pd`6bd25E9t#PDO>^S(y*Om%DUQer~_qcL^Mn8O>e-ochiVll9Ud`XlLP#J| zv+m1@iHS9xkNuh7GvO+EMd&?!w`1$KhP1A#Mqe#vsECkM%Fw0veu#};ndy(&;mdeB zadp^XfWvq8SFXR_9>!3c`nJ`+D+52pU^oK`3rl_>z&cT`E%>91!TTRRSKdPjn6&Mt zZFi)T=8)AVC%e_e^b0HvcoLLBzh-F6%E?=^u5?@hh|vR~S4=@kp9MC)Sr??cn!@sHS$MY;>1~#8S))}+7w&pe8E69Lecp)~N9G}ym(i=8z(F}m|0*qp)>R|* zJi@2S4j_oBJCs(@qDB6$)Q%K~PrHWI$4r+r7z4Ny+y`~!`lvH^<(Ji`IP?x;a?P4F z-=^VLWPDUaL?TCTFE z?d;Ct$n#B%c52)0KcDP>e>KE)cPs4L?r@n_cBYmYR=u9}KA5*gmmfk35mzo@J$%ml zbFCVKAFnFwcZ!NT0pPpE8IPo%XZji4gW+5JJ)}WyCBA1rZRHgsPeMm>_3V1k<#EE{ zxzgLOSilOYZI}ALkE6uGI8}k~bij@RS+zPfG4Xju0YcyYffX?lXT*jv}B~IOsIoy_2uCQ#g1##EWd^k;{W75{Wuj zpPTkG5k+_N7j#Vt6ZVtD2W^9dF|6f^_=8af2ZtcWiFb!kV}+~+ftQT&#_8=o0XLpB3hkR-M5P^!NU>=kOLS`S zjfU3q;8mGliv$xHP#__~ssz7d*Ry6s$Kg5xWFskYd}ejLwi0c(x{ZpD()kKx#3sJC zkqrC!?Ds^d~fVF-I*k3zUi2!waKYz`Eh3r>Ku(5$F0zaN{`soxxzs;J` zOfML5=1qj%F{`*o);P8++}otTPx&}>q+xLSH^Ndoj~0*~4VQ|}A%mHd@YpcmJxC0zfyS;q}G`CuC4SG(T9 zVPbbH^_#$y+p#=#q;ARL>v9>u**U{vqDZ%2a|y|-V48C4G=BxZB{Ng#^YA|AsKPngvcy{RWrdY+~k>`rYg66_&4j<`KBq;`YS4O%zhlzq>;=I?t$ zZK8g+to{-D{we(Z3~Bh4#Pa}xGe6&QzozW%8-$o2U%4BMt|5Twra(D9WE=JAxrVlm z@#jlkWi5W>k9D^d=m}l3B7ir2%u_*;#cByMWl9G8L@DnOx8^OYXi01DgBV4yDZ!S4 zPZsbK7L+{co(Z>EJYrca-svCP5i@fdx!`LN)%+1Mbo~Xx3^KQUtDjNm`Qr31E(tTF zDx_;aJ;C-l>{Q!Z%vOJYLP7P=s~vactpQ+YXmB+&hxz~nLfVdebjAa6Ea&1lu|lpN z?VO!c7{q<{$vrk(EJTtcEQd0MR(e|kyWqGZQGSmNwTggEo;Q5BR^D1?iUg^P!aGZw z2@(Di!8bCGeVTuIE(Q%h-;BCEXw)CfI?qfOSeF{BE1NU(@2j*vTWDh&?G9RfClX_< zsy^(md8u4j%iy;L3b>-1EUP<<1P9O1V85L#g_6$B+*3{C0ae)yeXzG9J(bx3^7-{B83rAv~ z~Y`BqU|V&KCXiED$dCL3fO4;y_Td-Mi!Yt;b8dKL&nX7U}nn6 zK$vMr` zBz>?zj!V^!$HM;4?JYYZkpm^t=F#JLMqwrOWR1P_H%30(M;fR zVyN+8nF()5M1_y~2N5+b5LcQ@dfXwR38;7rtJK9nkzGqRh~G-d?x2#%*y(a94G)g!q7EH^*Hz&)JmV%4xX)2L2O5eUgPdd6i zdW)Wv7`}z?F{W~HDK2MyAZ=@gUG9w>Dk_@<%=Sy-c*v)HfCM-|_t5I`6oH@DuR3~# ztm0>jj|^_7TR&H!Ae?qCr#*4Dt8OeQ4(VM2EPa64G(L(#UBa-bab@AiJK+F+sRA3z z%*^Vuoh{`62BOJO=$Nf!@IpPOg3j~B7cYn?yj5eZq-r>cS`oMzPtSh$5Re*Pc`x=B`WyjojZEh^@q&a!60aqIrA;60iPAK`)F*Au zm9Sm<6>M>Nw!e6KcqnRStB=`th0FcuwfMRB!G5)mJ@-KtK+m6@?#d3+h#+)s+mOvm z<%TO}!%q4>mia+738j!4Qs~aI2c|T7{fO50Nmy)R)Qfub`ZJj-THg$`9%VyNF@DfE zDYQiUBvwlUH45U#2flqWL=5^O`#@n^bCs0m=+NX^mUsD(eyIK3>-U|EvXSlo$xQUwmHa+*!bu5a&e0-`$As%eDe9UaAR+BTdf()W3&#pKC%2ez&VcwgZC zL8%99GNghx2ZM+lLu6Wr=h?lO>IFnre|bIG8phu}6r)xd($BevtIWCYc6H>JJ{-aC zpFon4N>6egwH?dWS5UcxWKAW zZHWZU056q7I~eli@gxSl0szyTR6NEU-iM3MuWQ>6O9~3)=K~I#KVwh)e<~6TI@!(r zR%i}H6Xu8>msxBM=P)1&TUt!B0}g(!csw7-d*xliW!hxm@-=WsXew&CBQiD;IfY$p zMNkluJR)*%+}?%zbOo&Bjz}=Gz073UEi-L?{b=uPpy0EVB_mcCR-uoz{UkXjW;H-n zFuW`8=2PY%6?>Iv4m#-y=Fe&vh;^sFs^C}vgk=)Xx>JMmB`0W5It+c}YGQ(oQhl$| zE26>!0&r#It71V!c9MxUof0P~k{cnFASA3C=4xHUCZ@{{<=%g$*mSz=blf>du^ts9 z65390lMwob7s|un!0;6BFQ4FzTW32Z!ov~GpiD1Kgy&gnaTur3$n(O|d`H zQKOINcrzizm`UaAyyu^e25Di;sb2vzn-UDTHI%WnP?Mdi5P2mc_)eZ^k^T?X-aH=4 z_wV~p6fKAnvP_Zey9k*{M4Rk;_7GWy^dtvs{F>{bEI19mlGjwH{~X&-ZXt! zzF{t;T2+;3{c7T++;%}c#x=m2L&pY+`=+i=C{b;WJU#S|sgnC-J|3Iqo%I%*roX;$ z(tYcYy6ll;;%g05+fDQCXR2>J?l_@tEA`+jYP-{*lMZ^5sv^$w@uzE=gWlJ6(??gV zY|hHa1PAPGZ(C_&XL*YrJiP&3D2|~;{)&>*Tys~BiH9U7U|G)~GnL}a^pnxN)WceD z?`O{Ws{8 zL)3~gPC)2;AXc{A_>|KobFi(w$-RaqiVlN@-5K`LwiA!|7e1E<=n!B?vrfgg-tLBbM}Oik&0nndMSkXRFrp1qOb$r_ z6X7AvMy2q_1=ZIps^QKv>dA6l-610&GZW4Rv2kWGT4q~U)JyHI^V5+m?Wc@J@8)=q zubI~0hfQUVzqu=YQp|r!ye^Ju(0cdi?xWr9^nQ*w+lOx9;K@GWA86yB9TFg!2@sZ# zE#MOKi<}Y8oJlr+P!CsLHok(=1jkEomJ>0mbdFjgKsqmXt=Z<6n_f9^#T^H%zIOIOoK*i3er^xA&g6a0Ik5s#6hzLS3XR6%1aFczH-#^1; zF8bPi@m{oIpnFsM!}F1(ZV)yzbTKjV-mMFH9?(|?>dDewKDT0nBlE=|VJ=sj;h#Rk z6S3W$Re$%Yv0Jy_VJXzw=6}JrI7hY60r#{)sa@=nJbv#$GMJ}aX;T?2&aT%~z{jdQ zaSHi|6a{vhHe9{q zKtJNeqEe9TUv7sXh#eyRRmJKK%f%)b-H4`@x>=?pe>;_wNi3 z8sj5jMRHuzuri3>z%2Y5Bh6GzZmyxnvjhgB$qX~&#tcqLS047~6uqty^zz1cIC(uk zn^;?W%_EX#ETaN0G!{r8jWXjBi?uuV&nEQ1)TeSuqqTMiBmdfMd{ z<#Gil)pmo{A|+1=T#J3@O!|E;-sY^X0HV|q1+_f}o4$7Cl|F0{)$1I#UG%jDUvdaR zC+iN+^~E(wTtm5T0(~=FK>ITzBQ6$Cz`%^(67FyZ*$jTI z`)IJZc~@rQ>fK07g1vg6i@k>{oICQ~s208=FP^^PkC@WE!Wv@))Nbt_apH!8iFkZ@ zr9tK#bM_zHLU_nEcc!^$>BWqXmd$X{7Wks^Bca7Q{l1@Y9j)*LZ05sXOG`+=8ESRf zqP{f)x{t?^bvt+^f~Mf*Hvg^%qA2rTI44-O`=#vshb#v|W;LSR@Ftayu7+0~-9csE zhYg<{St`VShvuJlx{Mx2^O5j3`zKowcML^3xK*2|5kqOcC=(d%E%GfxBOeaD~E5AB;iW4;|0EeUq$5oskZTMSXA+;aI5pKb#%X4!!gV$DN1sJ8pKOuCpHnnv@h&>qX&=hLY0yWZtl9pF*r$9+fuQR*9^vC6XCLi#jd$%ti|H%8b| zD7e_aV(|Ca*KV$VDdGN756_dptyB=JzMUqIU#Uze>Se=i*$lo^uBBV9{annJLPuf2 z^jMF=a#LT?5|$HAKT~^RA(w|K*EQ24@*ij@Cf;=GUNW1lz z?f6*o^WzaA^QXI1F8*q|VZq_K96}TLwym&60>fLQBA2H3kwZrgb(nF}$HOdhPM&8} zaj;1Swtl7I{Rfwid8Zzh1-@Gn)ctc1`;p+1HuwwqsVO39eyD7oclhs1$*h2v{`-0Y zCK>;26FX^+!jYacd{g#5|)u#vEk}R%dWHwB|>S zW;prrLJ1fs-Bq&082FCgFAFmGVbe8ZtJny2Lo0Ded;Nz|Rvep(DUS#s7B6hx3@~+b zV79SizbG|@84?KWFxT#w%kf$5MUDI+B*^uqmL}f02~YxBGbcU2S#xK*8<}`gq}!8K zU*_BWHpC;!-7~aruRWQj;8q?Cr@=vlbV4NMoU`0+?S@CXUF++J2`E%%SxHtzOr*4Zz& z-rsfmw!L68*t}%NB*Vp&RM$bT{eki9T}j+ljTdU7V5~djY7E%#KX0!+rtJ;ii8(0? zNZGdSAHSR!+0)r7&AWIgyBM5%|IDd$g3IwYtM%Ll3YCP6e*#^@1V|Pa&!snu*Gc!7 zXCgwt)TT>~8K>s|dz9}orB|o9d9~7OE|_gFXhy!S6}vkbZm>^T43^L9bGVY3a)VK;S|{fC%qqojkEEonm#IyAsulapJdofM+8m(qnT?N*L`-4 zR+?$4UYGS=UelP7S?LO#)RJ9TXf0S>JcO;c_v}Y^Rux;%(qD_S5#YtBOJGo(KZ&{h zS)l+Rb9rmM*r8vnc3d(qx)4Sv6~2tyl@0px%mNA{Th&P`R~mE|b^kKU=}RAQITQd?J}2ce2Un zRUNI^T;ntlQ?nwJz0-WoJ19(~L~1 zW7irh&daTxkO0FF6QHlJc+Fk~W$=TeVx5}*g5*ol#^wEjq^ERKJGa6Z+|S|)0n08b zjA8o@IRN?UEi)5hmdZ$}iC^jisNR2|`8^9SEq9iGZ?g|)ZjWv&0PG%f6Jg?5n8NU3 z-iisJ>cjY06aw_~Ze%Eb?tKCxbBUS%1k2k(+u!hhp~E>!Jfu3N`77WEl4#6-+4?VO z0O%5aNx%E_FgT|2Q78Qq<&>$`o^PKMDE0mm^RJFuTi|5i{!QcIp1|Ymazb{;6?Ngd zH1~)Ff!+P*f|E`iokrEl)o=R2pzA7Y=BTG&&=VrIFXu<*MO1BLVEELg{RSSSo^($W ze?>Im87i3}0JUSFIC#Irb!mF+yIlM{Q~f`*z0Ni0uUH=6*g(v{{K=*C_V4PrJQXNB z!Y1%b^+2u;T!W_=67w@sM)hlb%){?80)x9syvmLv42n7bymma}gc4i@F5$W!JsWJx z(Rc^Id`8@y4&P#O{I6r4U+3l)xIae22#Y*rtIu@!TK;2?u>RikyTloKIc*A%VWtt< zug2Tt=_l*vGc1XO2vIiPFc4DBsgt;fzo}>`sm0N5B430$!&N6iBb6`xxc6_&3gGU< zuDp$uR2OnSI5>|jVQ8Agsvs3XzO%#0VDVPWd63_J&HkQAp<$~+3se3@QrhglnRfvO zSW;!&U&cLMU&+TqxRKFj&n{yvwIVEt6&`ZpAKu|YHM{(1ug4|GDz}_=~vgrQ- zu`lL$DkzW1_`hcCaX6UuwPyHYws+7yi+?^3+PNGJ+e^B{bHyTjC(v32p_RplHey2OFFOrr)*(Acpj!%A7XJ6c4 z;s|Ar1*0!3P#R=iM6Ank-FBZjw6!eRJPKo~Vlbv*fTOwZ%1y%qY~%;SL&nDBOhh+j z484Px-w*HQ>^f~8J+mfUUb1;@3B%8k47lr_*bLKi4p)Xf#*1bCIw*LsUIx*T0)P7519+#0^k(HAt?Yb zpK3vGe)-J(rIVQBzR0_+c4r{$6sVl}9xJ$9t`hv&Gb3KN@Uff^Fk=-PQnb)jFC27+ zKR#hdQ0z5uyj~Eu5@0T6xpG44FZFn4s-U*<$a#=;1pFGDYkq66`jPMCadY{ort@*zroD1OG<0^Fu|gIK;TQT;0Q+?kionQltW~n*hom7w$+anzIq_DL!Mn z718Gu${F3dkR{BTKsROuZI?7l3tIwZA>JX8OR9dMtEEXSEDPV@j47GqHgya}qPO5KwwnDJqrM7erKaVdlbXN*o4bk}i zRH2NW+S~es!Ksg~iDTCdsNY$e5-gI>2+U1OaMDJOKZAZs#ApMtTbD;<$X%caI|iYx zH$f~)aWOT3NzT9T$`|4>@=FKS)Kh8>$$J~ik@jDS-TyQ2i@3R!t(JTv^5Ith{~Pps zoTVNkx{{D3OHHA6m9V_GEySi+XE`o`mcOBc`+H~7d&%TsEfT71W=VRX+0jNLuX4t% z>@04L?WwA=Zyn6uq_D17rNB4fnQxGUe(O2Tjz;mbjB6h@5K-QB&z;?*L7nLw=_5~< z<#ebRx8+;U;#Pq_hrvA${?frMs*+q4(YG053q$P-yk=~Oad~OQByP2xF|OE;W+0bl z>}*~2Bi3GXCu9r>(g9}GWTw?a^fzEec@jWY>1hdd+-IC*#F@M%7<77O{{f>ly5#R} zmMD4gv`XJlr!z8cGaA=O_}0%~!{O-6)#+rvsH86FeG?sZf<74zemkgFy*8x-*_A( zZ`pQw0-o?MsaJd!pA7Dt=TasN@dry+lNn%!05H7V22_gXdQ_Re{xW;_w)_792>CP9*; zL1~3F(g}}0>(@WV`xk=jAc`?mwZdT#zQ0^!0rWp8!pWJXmja8uA77jaivnch1_kDy z_c$E0WF5QVG&j$3ysnWmIRD*@hKf31X0Kt~y?3N4pK|rC0yFz*m~5WO>6|0Y%`63Q z%L|hFc$`BoK4G8l3AtZQ))gQY<=hquRcT@2{tH+#5~1m$G|6(mkr!z6YrJ{cH4T6G zHhz@;@>vj`kyCIIeyBv1RjBKqW)sqW3YVWk-_PBZ)NEn+Jf!_z55Yjw@HBk+jX$~a zjmN{|$B9MLyB$)v2Y^`%C)gPt%RtOJRqYm|_cZ=R&0Rsuz{tghb$Y0(j7X>v{AhQ$ zn%qKuIW2MGz{R<#E8 zUZo?Y8LNco`yJ4BO-=SM zx#ArW_`}Pay?j0*mpY@*#QXM-$&rB^@0>N2pI|CgSP(V2ZFXMyk!0p1IlUyeR8zP` zokqb&g43!C6Z)p&-2pMU>e9J+_eNK6;e4%AX|x$J z+6=>JARcm^bVpu*Ac?LzHunLGA4iLducemH62(QFjG& z;ZqS_QWSdVUf5-yy`s$&0k@+ko%^Qzkj3E3g}tUmh$5vGRa3B(p>;pNkm@lbh3}`0OQmtJrlbzu6;_!09{5jcIMyt!LQkP#($2rd9 z38FCv{3G*78X%yUz;NHq12m&-b=K*g?cH~0+>Z{aKXNcs4{Hu8dndknADrs`El^Wc z`Fzwzri<;$J@^<LKv_`-p$WvB4YC_>h%i&zT%cDW?s%J{XdU)+(O$DZh$dS~u%h?)C`@ZH-~dUV}TJpJ(< z&5_f%1|eMT$GoNFh>(X{?-L-E3^?mMQe@2i5O1%0teb0WDxs%qRNv6e|L{4y;d1`F zX$=W=RwVyK`B`FRmC~j@b(ppP$a~W&#l;5u;uEa!A?u$`+a3e*AYx;H0FAI0S7(S< z@$^yJ-cfX5inXe;HomJ95F}#x}p2?leY0rC| zu-o|F6(?KetFd8ee|bKg8+tmO@D~;+#DiS*GZWwrMj9I@(}s}oU!xfb>M)%8n!cG? zWP_G&NF);zH|oJ2*#k-7JWai;zL9${jNbrw?jXT zio1{LxMKYF+pLrCSm3zXRK~i)ocAhP=+WFPF z(V!TgAI{IKwFV+}e3gH_M_Nyhgg=z|ZY9GwdHTH?DCC6h=heQfsHI~WFNcPU(&^-z z13_sTEy$7OIDy2IX^L!zfk|@zNDfub;l;py?Svkcp3>b?*E`0XfTWmyT=y6W(pP}{ z|<@ zFH-*wTOE3g37?76g{JrBoWSV+XRJwd!OTmMR7cIhka`#0x22R{N?4g{advRp)3e}X z#t_HP!VADcuBysM_Zz_dN|R67Cm*d0Gs;z}pQ>%opU|Y~Eeo_}i77fv&w#w&O8I$S?{?bI6uWO#) ztNztUw4Z4T=et$=1#Eq{XBy9yOnr z(vVYvZ-+U?Jx3PaM!-l^CY(68nonMfiMGugo)aRbUR$S1tvlixo0tbMweI;|%jv<* z1hky`1iU}(I#P9w&P1R&wNl!3O}Oe^(1DNZxMNl5F@B59#C}kHmtS0?LiE790Aqj* zMngw1^PO@Qz94DSc>x0745*s$f3N{$!1cu0N3&s$KE*-xpUW3kvePaPV_X8gRw3%4 z5tP&piJD$VDnL(GBFReHFDUEfT%t2SLpJWzW(h7{^1LESHfEn%~Vr^yFO|C8Y=rUJuNoFrE`KShkWh|vSM zZ4M`*5AACA4n&k<5K_ma%7s zQm#vnoF_<2jhPJTDn8q^0OG4z(40QQn<_MAvgPDer@D{!QzMXemBE+4tiYv_8V9EP z$DSGU#M~+7uN6N)$%1@eM#j)smhca5gnKItDy80i&uUDk>#$$=)spC81D*6i$xgMS zvDrC-MCI6(gNwS?ps$qA2V^D+G*^KJWOkh<5R_#OsR8)s|MaAQ5#fv^nKgL4_d_(S zmvuzN3tf)9eGnpy-kQPAXV%P?5&)Klpi`4o^lB1oXOb!nnpL4 zoZ*Z?&4N+jBf`xraV<8n=}+C~NK6FF%r~?r)>PbKM2vhi(0C}T=YT;?){&6V{Q{xj zg;evz>$*L*6Rw9GIqurAB%*$SfyLS`mdsI6T6uAIpW0*WW>SV38c~}!r~XOmL44iX z+aA}^GS63Q4|^@F+Kr7p-<|QCWbz|rpO?spmUmKy&5)BrB@gk$FoqW1SiN6xzbBp<1IYyDr_*v= zU+xw#ZYg9k?Qx+@5G9RT!YYQi{s}&&Cde5{u2nV%4Vm+MNxMhaG}d3YKVM;wmf6*m zZ->CEI&K~{?Qt{}FU5;P`Z`5ZL9Q6Ll%^T;5sS>N_`!%I-)Zi9Y{Zyqkfg<>V`0C` zB~oyfuoryaO8XvOFU$T*v_Jx8LP)RB)Uhzl`S5AIglz;|qu%!iRjIE`>W%e^szNoP zrxh9zB|h72ye+^Bh^Q$`g|sEj05$gLtSFKY#^Uq)?d)Q*^W_X@HepU5yv0(?Fov`z z;|e(L&`4>gOEaec6UVzvJrKUq-}Wfb-Gy2B>GeN}pO9wi^O)iG!Fd{9BB8hwy}vcr z+5r*qimvHbmsgVm-PmKPz3X7C{yR43`rmP6{V9l?kogYd(?{uR`wRFskzCrZF(FDp z0Ga)=A9BBdx;-RK3!EoN#}>2oY*Dg2bzh~U{^xU1TJ)7vGGKJ!<8m|w#UX&1L)6O*P+o8x!mR@OXGaye;LY1THgbb!R0kW4BUvdRrcF_MQD;wF*{@v zD(&F^zJ=!wuq#Tro@YWNSkbW`?qV-j$AR$G!}8EP(6<$pMx@}No!LE9eNPk)c5k^~ z1v$MJZR;3}dDj9bh2GtV2%GdKE`A)l^DxvL5xXghzxhonT8|Y}ph5<`ex^3T ztKvlW{OlIi{C0PTEth-Ko0UrC%-_&2pPnR4kKt5bCt4lstUw!4!2L22Kw8n=E80ao zyeg5L!Y^w+x%LXsr)}%E2f?1*Ps!Pp<7)4SRO{Rie}iVkO$yHZgv(u&4Z5-Tzk*&? z(`K|iWbSJh3ewQPj^f253t4WQ(2KyW%1fyY*peP}NsYv-lXqr-vemj|bYQY!-0Tmv zO+`|fZJJ-VbnWs+RSQrviWVI;-AwwtE|##|FKpZ#rH$Z9n0W4MgRf(Dm%92~?k zcgkVjEk<>DIu@5B)dd)A0>^?FE9FT>!-ObKz^k#d+ft9DiPu|r(lc*Ci6MoBY_sQ$ z7H0pA(ft=Ht}{}rlm2|$aYrpLXo(ze>eB{AKS_LcZga@#{>3j<%%f#&PSxLU8Z_^kxqLshz2B;NgtER}&}cng z+0(G(ESYeo;xPX>c|2_2F7JUF4_}z=me;j?CmSb~P#Nyo>(|BMc#b#wGyZf8{fQQGa-WZmB>;y6QIpEfgXHag zq(`k#Fss;NL(=|eF-Eh}ZRJj@VvVXQI^$q;zs_k=ZkQ!~6GzV!|K>6ox6ha^cn$DF z4_zNTU*UiQtho0t-q%8Gj4CJ~{C@}fo?4ud#_vN~S;|R(k>q61!*irdZK+%{JjcP{ z{w?R~n}f1U2}Od(d&p9bbjrgvaO3@G6w~7;#N3SGHz8JD5x3&JGjm>&$}~~ZSfuJ% zyR_r$+?Eu};ckHV)NNIl>#dJKR(_+y2rOM=H6LkMo*M6d zr`)EyY|qgw=Yteh$hQ@`1Kk~fUDtnL0(1CdGgVyq&ZG`33(Vm+t@;QijzpBRYreWL zH4G=B22$xXY_&yfH(b>EZ68-X5-bG5Na$^P4xjWry>A{%vSNM{o53dVQg-3_fp0x# zJbY#o5y&IcQn9t0L(4Zq&rshr+&&t6Lz!N548+9F{2NltOEHrlA^I{8iO@szEufcY z?t~C-DH0?wDT&Vg$~fRC7qLVhN|E0PIV$g#(;frTuN{{ywe;cG+R^Wld z7ar5%SQ~xCA6?`3)Lm*@`LlnSNOGaegU_kfW?V%!Ho~}-K5gx@R>C{t)H5At?n)kh zj;y-PI51jMuKNks;ns}bvL7N)(nHPt2C*V@qx|x5k6FXRsc=P;b?exQN1_dv2zBK( zP6n^fQgP5{r37=rHD0kV{~~jKS`iLC2l755>$kwMon-oGz9nO&QLV(!a98_=+`|#&3i!j>w zh}=$4Z-Ys(eHS{hYi__D?|4$J`q1gHq`EW0UsKzknvVEEprdMkIU{n!V|prTV=0o^ z4AC|g0COL^7>60!doR^gVX>=W#61<2fv6XlCE^AhPSu4PPx(~sqb9&wvmlp%9enUH zYecd%6x(UfsD>w{`W@oO@2R#TjN`832#MVZGffspLw}GYY0-d+vPOgJ{oh0kK79XHlZSjL8g?S)Yhw1kDlr_`z)= za{F`i22QloBLQYc`#iw*K>`!|>UZkN(?2iN>-97`-H>|qypi!ldW766pCKvuMe)k^ z?l{lagK9-p17KHB4rhU1IxENfxTXd%$e_usdc-^%{6*m`lC*ia65M)JMOz6pkkr3- zVfz!Xjc-L>GN%2k(k?&K(9!m%ip=7c$f?-X#8E*16qVaAppQIRHbv1ytqy0*aD>9b zXOaGH4$qwT^%(CZj)ct>1h- zOsDsQm;I9;&m?(fmv^xBivlpC^4s+w6^mY&^^xU%T?DY%op9&A2fL-uBb$P;(rr$6 zhaB+S#VoO8Pv`4r&n90jJ9i`hvncd)Cq3!MOZf}W;yDlPVQ95?Mv zGY7SxNiFbZYXIrIRB3Kh1nyU3M`=4p6p0ZbUU8y)$yiAJuk89>{`+6g+9-c=PzhNT zh;zQQNV)rlzAWC@0%4WmbJan)p;GHq5`d0l*Ua9ibP~@x^9|)n$3d4qnYF8ZdT*|v z?^XS#`$X?Yo<}XBJwaVv6CpWiq#Y^Qq_cRlbOy2P^Mp{x*5~bh`=i@|2eeI#w%gyO zkV2+J4-KUd$!;BO-z~oExk%V|l<0GtwmzO&X0%3fQR1A=5i0z16J>GA*;Y`L5J~l3 zOe8W--43}3F*3Db&ByI5omlN&iX5Co!-&&wd(fKQApjxH(HRB;{|CvsR8FwGzl9Vss=n`A z^H3(yudnx)`>kmL1VOC<$J1I6ZJ_e~mG?qxNoz%ifrSvWs3@$ViPHO#@+!44G00wi zVC_qlx0U1tI#2@bDs%aQ{~a91wR%wcC+&+3`!5PZ(Ii&pC!#ADg$HTqlp{^beZ7ot zY{K2-Ma|t8&3S|Q*C9P2U==hqgO-cy78HnWPbSwR)z$C%$eJ}#3G#rp`skWr)t{r~ z1B@>g6X%C**l*AJfq8m^R+5WESTFyQp(Oh4(dIe+`QS8UdRDz9=q0_*(*u@$z>SCj zmm6;O;I#6K9}zT}soQHnBzU3oZDWZMV=I-nn&~MYSFsVH6sy;r8)8N#sV0fQcV+xZ z)K5j5zeAJ^_m{@wN^c<67_{+n0I%`+mQm7!!L1iQl_6U9yUnuXnuh#zCCyF+HNlJs z_=@My+c`@!5R~eD@=W|wuI%x42+u+AncE{E{}(6UbIq`u=e$%q;b0R% z=xw9en1P(~QX7@|O4Q5NA#$^wK0(9Ltg{TS(-zUu`jB}Jhnv- z_ptFVCWBHK#O_kZc^cIXAzfP29*~`p#*LKZQS_T{wqXoM1YZ6XTvcT_iS%Gq?U)U} zZ_^N@~?+{4PZ4BOn>TM*J@3b8%8NHmTH%xnvv5D0q z)xBaJ80DcSpsZ_k50fSKNwoPLM7THK>OW?_zlOpHV0fd?_|xUKT|@RP@*{1KQ?Sd5 ziqN^CaTVX5!KR=im4c_=+Hq&T0S)9Kk)AUAj!6y4*tt#(YOBQNJH9{4=yp^!;S=Ua zUMofqeM#2_h-5t5)efYvCVeKICFzb|!7lsz2r4vM%vmv)U zo~Kf|_Iv39eo|DmMo>N+S`}a+$vAb6{gA@CMktb<7J@ef4#_Y!)OtviuQHYIFylT< zuW!F+?BNb3?KAhunvv_fS+B%rDXAd>gD6RZx3sM&D`V-&KS@Sk2F&6~bQw1x^;(Ra4Vw8>J)Rv?SVb zVl6pS=6&Fk52cen--!@v`4E!Nh=B<$X1o%e)9=8+2G+E+Q1<^kB$GMMvC(W1LG~F?wR?`|C4@h^u3n7HvAX$4-0kjhsCS`vrdLWUp0z zEF+ABw=$M~InPu%l!C0$rbo#2$@<=X|9P!dqQ-1(TEQV`66K6sw)ysIrR+AAX18wE z_-Gf$H~Hh+G^CR> zGnQKf<&|J}TO-?`#5~~ET0wbpUTjuI7>Q_YWGj{i7TRXqRvJF0^^0Djo+h=810uyvamTGFSmVj9quW}Sa~r<9I#_X3MbDrlM+ zS*o5&ZBjb*;mEx7>~+vwu|?<+GsC*VL-qmSiQy+4sxFir{2Sbtnr!9YdE$c`>*gYU1KoJAzv#hwBNkgVc!zSUCUWgqC{4rXvL(7nFB{j2FqVWIE`(3toWeA3|+ z<2LdG(w`c&qI1F#lwe#Tk(@)2cTj&q2!F}M*iC04FN04+(qCC z&Lbs?PSZAHKKPknZs>C$z}@ysgKu9=%!czZ&9Z5x!3zHp{ez9c{BvzUA9)P@iv+1( z;{N-{1{MF!5}FdhbPU(m2@>C@t$(DLT-22xD190H{ONAyW0^vo%B;b0oV2dZ(T+G$ zH(#J*I1z1MSD}a?p()Kqi|jk_Ic0i&pgA(SnXW`>^W@E!KS-k)83B`YPkj`!;;)84 z64PO{`^HSIhg|iZbHj#eZRr4bpIk?F#P!@p+i7RNi-Zvj;=XN63dV_;2#9ccl=@u` z{*C{ft`yvU+l3_O`eh`0AL8Hj$wD?2>*wkJlx{g$!9Ma}>o z+ZcU&X}z=GN?fvm+WLuhlDqaG8orAhw??j|BCec{sR=N3Bw${bPy;_sG0FPKPl|L{ zr1PPvxbxKQ8gh``xroe17L;yp@WKnZAy;$#qC*@CapWWJ(P_{TfJTlj zK`_68f@5pf%HpDC_@(1-=~#l1_lbC}n_eDN*|G)pJ)p$#8@4x={rLGI4-L0c{$6k^ z=v0AG#Qk9k6XqTFjQ(p^3tqPg+&NDYxW7&vyG6mqoc!UB%|LBIHf3A22+kyxusXqk zGETBPRf}8sXJCoi2)Xan>mU_y+d(212rHho!_(%k3^wV5hVPhBhV8zt97_8m$Jcfs z{WXV(=@%NMXen(`pT`&Nyw#NPMTrL>(3iuN<@P7c+I~M=n=-QZ;Ogiq_0C8o!2-wGl7h%^z)6LyU|IuFL<5Awb)1KE&0C?c}*YKNDi_l{!so5)EsNTdV^XeRw>H!h?&E(X(-TgZi%|(pocz$c3f`T_I}go zSNi*~7p->2ODD=WEIS7?%#_d?8{w!cS+lf5`k>j351Z94nFiHK|jwB}(cmKL{ z<0KHAWX|dL<6z+*t0)$UtXS{GM_U%592c*jCMOXodN%E!)sI&nR1@8{z6aVjkPD4r z`sG{|^j{<8|DZRX`)C(u@-R2djRSnM-Fc~K@^vlA_fI1GmTNDu@6qHsMbubn6<*xMcmE?ucr zNb#KefX!e#wP?emEqeCl3z%*)LV|>8H=`Cxx#X8cn8PB;bp;1w`NiJwbwQ+{{UJ33 zZZ6w9I=wpWLt@?giU%p?E8zniJ!8Pf_0RZ&IrhsqdR^!`gKHBl>ZaZbKIWz#b~YI~ zmy~QC8}`-cfrmy99`X0)x%A%`yclGXNde}(Vm%rxvs9cVkKY>Q{iwG%7P~-t9&fNh zmhOIy=chDNGwxqo()EXMH9T$v`kI4)USAD0&=9V3`Q-YO_2s2Y;Xx<;K;_PIb)a=& znWY-B{Pb1{*?*lqiawPv7cq{#P=`KvxIXTAmCW5#cumf-*;(NUI%B}5x0N)Fp1)+C zkh@t-Y$C+)YdiSbl-qj+=4nh7@(3_q#tw$n%%j(&} zdjG35=R8hVz`k-HXCdroFm_tyW1YWqFdb)xzxY|a)5vPO^xa05Z8P>u5T~~<*{^y~ z*y#4e*wqz4PG=8(hkvp~loA(X3%3zLJgx6)$k94QShJ5n((?Dpp_5X{imh-m%VB-p z^AAZaQ9nTdX#1*hjOyd$Z&7l%-&~grF#a4WXM44q*%23th2Y3BX0%jEUFmx+z6UGW z>?a2694nI4PJBPQz5#Q&^VE0a+jn10N0|YcU)EtU1G~o!!nngnlI~=j*$1-UNi-!N zO4j`JZkXt8bp&Z_b|bCCHSkLSzFJw|S5C^;>9R(m%lB(uEUpo$a zT+U&I*odOcQr#j7&+N+!6^3CtK)Ky}xZH7Ex}|plbl1b8)j8S4r~M-e)0yc8JCyD@ zOMS%t-aRxkW-R+)b$#)$=E91#&o#(_DUk2!vJ%EVM!jd})_tAUQInv=gE#nZ3YIqa z6?FcyU`aHmu<*S!_WaRb+0s5f=B`@}-lkm6>rRsZGlow}n|R7cnVua0(kWC)XBgmJMhIJz>S2dUyWMFkM31Sc0>16QcH!n)1dV5_)ovEnY(4*2(*zV&{|u4!C=gS> zbfFgtyg2l577N_rL|Sn3(dzH&ut`_r%_l4DB)4gdU&|e~NUFnl;^zpvgeBleK~SH4 zFT)ndqjO}$ChZiZxFPFiqq2|n!sRXCq8;Wr22NJZz*zrGkAqgxSsL-c|2W|Cz?b)p#klz&x{pu|=1XOq=rp@IgiPVfe7iV%7 zLtW%XKH85SravhCRQ8lRi0n}7HpGQis4-I70@z%j@<*MwkjQc<2cME7A}i`2ypF~?!+e<;) zS z6P`I+gb7cud;Yv6!{zopM|q`39jdb}@urOXk5Q@fC8F(c1mV(6|4{7x39)9*BTkWmQ`8#=fW|!$oc(dYCS0~?()|5 zBhhzl`xD%M(j@K`&E=_>v<#y`dq`H8sE#~{t_I9#%bPP7=kJS-oERrq@KZjP=ak#I zTPke^+oSkB?0m1m_2Gwe(tJV0nR5-XU22+%Pl7@p6H4j%h(Kfl~1q7RZB<%k4xlu*8wB2~o5P z$v-x>=S8GtlUZe*vyWmuf7b0L(XF6p>*#C#T3szW@b=GXibO)k3TheQ$L1>|Ul=8> z8c}kWC%Zhj0bdSz?hjD=zyCv{h)^hHM(?%hd;$x8=32l+9 ztUM=y>hPbXgmOiokJ~Dd6}`phi9ZNa--F*5b~V{MX^YfsFm7#(0$t<3m;d>{T}23^ z01anH-*wj8P)R(b7K*1-w7U3A588^W_<8Hd*i2yin#8~Zx0(i7~RImU4b)K1!&&ohMc6M zWyn6Q_*zQfGglz@{Xtr~H`m@!#ps0OhF`iKE2VH-<3ZB;e$ltRG2e}uPx2|g&bnm? zmp`N6nQu>_+p6BslD4B~ma?c{H+bt#Ws!Dj<4-1qg=!vG1NENq)+y zleb57rLKdx4ED$2p~UF~K_wnXH^LslDGJ@)BVd=@u|&YRM1~M7Ps?+#Z>z{f&B>ue zZiRZEmVeWeUM{p>K2{ibz`FL8Zj8^2kb<)-UzPQ^M}=7<9HJAKic9DOD{XNta5UH5 zJDFdy46Ub}zD)c553=4ntf{8!7nU0Zl_m-zQi7-;g7n^kf(;QA1Vqq)NKrvRI)ns7 zibzrEB?hEP2cr}zp^B6ckX}O%J&*uNAmwblpXWXAcfLPz<;tGD_e^Hi%&Nb&-ddy^ z9lwuy2a*MG406D%N>W?VOJCkkfWB$@j#<6anZNn>hE5)=hWC4(&=a9sdImQpXG8b= z>0%2J_>6!K^s8mliQyZC$+V20n26Ll4%zIGqiG+Fxu5vZ_N+Dn-&k$*+~-7%_X!&L zL)c>NRzeXy4My9p$|rfFY}a@vzgxtin;8xh{WV>vsAw zD;hV9<3@gw=RAz_sGlxFrpTXvf3rJ$Ps1V~%85~q1K>S^_^Ef%+_pZa=QzC~zhWUi zCrL)$m&kO=?wY}_!?bgr?#Oa6biuMIqSvU->PKxq#|GR`BJ5J-;+@+FU3ex7 z5p1vNof|<7um!dDh>)W;y!RifT)A~cP^CkzTPycS!Nv2y+!Q<7@Zk}E-VA)Y{~ns6 z0b5+KcU=1@MVPoQDX#%jfW~@r2Ik9a1jlcyzCIe~mW_#sCrYln_^jSz6PoXRly=tOU0DFNRsjMt1mjfw&CSE{?nIlSQ(s&ml2eL50OAY+vLT< zqEclfnnGI4@a$1IfByX9_%L+57)qI`eo=UwlJi~c$Fm&h!Z5RipI7_DIey=h|Bkbd zvxaf4?YcMts%ZbCyz%mnX9FzY2ai5HQjDV1-8`%n+-K(^@^k(|CAaPO+cy>d=TEpl zK=$_!;I@@rbu$vAAyod&bISc6+xJ2u^_14AtYNIY53eo?c}Yh}2@c|AKF_qxmgp=8 zC6VlI@VWe1Q03WB055xl69wXAe#xPd8`f+s6IVM#Uu4U-$Os4V3`ii?@2|hbm8@FA z9MTG=SQ*B1g8Yo0K^*lq0Tqm8<*l~V2T?*U&-RP*@}=F@{6wCX z@t%=6-DB%c`_eL^8CD?j{|tafJ^M(PW+u?(Wl9&2*ntbkms0<+f&wX0Oc{JqyY5fV=Z@`T5^8%-gt#~g zO_8Oz0)0kn>){@QH=fojA8-|CHz%Vab+=G8Mw6Afv`v{D+8dj^SK}`}p?x?sUfl$x z5_=#s2D?4;>g)RR?oX0DAxkN-d-2@Q-Nls5s4g6UFM|fY`Wz=M9?KhOFh9^rV#iXy zw7sX1J{OEv9tMNbq;7Vl=!JTQclS;UySeW`Lnn4ldpRXaVN&in3xoyD?>oG-?^(Q4 zizQQgRjsK_c&mZT_p}=l4?xsyD(3)`MSCSiXFj`E_PF%?e#lWL!@=vY^QR1{G;w0q zU1~*cwU%j~vi-H4U><4lo@O0!9|>;A*o6_zCS%s<|3`(afK3{I>b*|9z(*d}`dMJQ6boqTxzMsN8@E#b zL?6qA!g#5@d9;yh%eHZmdV&vsyaoh$r`={c&;RdG7Fw*R2qFRU#52%9`6ttiYD!jfz3)dm{u$iMV*5#2 ze)f3gx7bBa?zE<>t1{h}hYbsrV5P9HHqr$vlo_zW!x{_I!7Wnfk56ee&=fni{aevv zSfydJ()N2mOB>W8HW9Uu`10bQE)M5*qO9VUN72J|lD+klgH~FNzW$E=tE7K#G4FO- zc&PEWr4xCVc9qs3v?he^PO>abd|=O@EaqdHFWNhL?ZifV3OJ(nQ#Xj2I)~oIQ=|+d zj2Cqrp?}7IU5D6aS2LCEb={xUqKJvOvp6>kE>_+c0&@bD^miP&w4lGoT*AwXxzL3! zn?hk}r!MJXLqqDmG>A>S2aMZ}Fy!2%ZIT6|+>YZVLG zxE-~c4@{GI!$V#>=^3g1k<{j%Mk%ES;IzawiM0Y9Ska0El5x)TTYkv0ELspFp$nsO zj-=EEP3P~5^(7U~>|6%R}r73>?QUEgnUH`$r+uECzc>k|rKS7P(O5*EE1x zs3j4Du2D@p$&w5B^{Zd5bi6Xw!K84=Mj8hE<_R=y00TMZKi#S*O6J<8_&6M`bGc?JPzw~ z9IV;L2~Kn*>pg^SH~r-2wJWkPZgfH>+ky9QLj>;>=iFcc z_0Ou@0;*XEx6t%g6~Kz!0kCOj0xMXxTUi&b`~7JS8Jc{$?CLjtm~dLbFl{6g`th#& zJ3Hh&5BDfP?=e{guQ0+l;OoIJe6LY<+?rRcT8{Grt39nZ5ehqXfL~+Cf3hE z2OiDpW_tqp>kjidIvoQ6MMwIoyE#!guuR}W`h2of0q7D-nAJl2yYFaC&^4u|!pMwI zdm$(9rMP~A!m%?s9%xw)#u~1$8|v9rUe{H3IhVlIeZk3TgAyND z+^_CM{KFFK=zcN~FF(*?wZ0_A?bPH6Px)oFIc-$g^Fqi+auE<`*NJuxJYkMw%WmXP z``~<_oB698s4Kc~jf*?17c=FHXzCw#huI)ZhSPt4<6no zb)SIq$o9IN*Mj@iD)9*k=8f%MpfXT@Cw9#KqQm1^xyL{G;vU{sdps)uJn#Yi>J*nj zw4JjAftEPqp+3*86>q2dQ_jSx8WrPMqHhgWni`yB0}{ENyQ_o)sKS`~{iZ?8H6;}f zCVs8r)3@VDbcT+FX&&vHOyP?L1|8yZt22g}h?UDqko)s*pLY*LGAfYPB(Ux81V#|5 zLyXjya?+!yr%s)6?EC#x8=v6S{||7IhDYEzB+I;)j9a=reUA2bdoJ@O{T)?RsMT_V zHe#knHBE)bmEP?K>~J$i5Ls5~=KEeF2Qs{Pf+R2w{J|^Q@Jzr0Y@_3MNBptN)9gUZ zAg_SHWl zjxI2SrNng+B^IQ?ewe{j2k6B4S>k=K={GGS8gEKYrSgC|CN*JI>s#D2uJb3tE9J)# z#3BE+Ya6|sK7eG8n!lb3u##hNm%)UQV$FISiCQh=w(^UVocI22CjkVI8@trOoA1M1 zDx%gqvH8Gdx6gnGAba$hQd?qemkIy4lPIWRZbHw%Mr80YEHk=5gt7Ss)m=wDk@n6p zH!#0EC0DjH|CnxWDVT^vZ3oT*zyZ>>++R%jvDawB!uIPgNoA6fC%r+PjkLd zA$52O>I|X|w)@jl5q!I1uaD}|HPYj+wu%tx@Ckjob}9q&p{mE>U=4N4BK@EM2?Aoo zKAk27_t^yCQ2&Gz0a6=T$)Q!pO>LTJ4W^LfmOx0AbV@nsVno*lJGe9zb>T%LQmw9+ zb54|Z=p)D0?x+W6&y#4hNsq&8`S>~CfU_CsA**Qar{KUzjZ|cA{Xf0`9mQj6XBl8> zH3tQ87DqlD-DD8RBzEW{Uf!mYPv0fU*7w+0g5OARjY+nNY+5OW#G!L$K;qdy{_Nf+ z^KU(lXIqb-73_?Vt6c-HDXy@g`FBFW6?Vaw~p%`-jjG>xgo#@z7O6K$}T#>WQ`g%x|*3V01v)r*t{L zUa5w57titYa&0gj2Qc_DK`As7O5X=3-%jL{##tq}K@7+6fF@Pd+0M@^Fk%Gg19nN9 zk`fZ#vmZb1`Rm&f_|dlzn-LLMWzr&`Csc3AX~z0nXlljLjlA&6j!qR`C&71IJt}{C zpQ%OZRF`@^h(}ogGZW5}l^S~@D>Y|!AH7Bm#RomxI=XkJYoLR~p33@X?~i!eTgiM4 zPg_!tM#4J|tH2p%HAFN2?rgvUv{QiZTRS;0rXYyE=Mk)=u+QYvq*;?$0(mk`JeVOs9g+)RVQ)~I#Il*I9410P4_*DGD~e) z%p_TAzAs@t!cL4a+ZU=Myi1ACWWpHsI{s-%qH%m>dn04 zX@f@K2{rXh2cPvR2OmP3YJaUy>SOn{DOYU2*DuyTs^Gho%K`P1rO;$o&`=2L>RRRU zb7!^naTa^FP4Yn8_W4x?o*=DC-Zx2`+xr9vFYo`60#1bGN(&9LfJXb%3`<9s~qHTH&vCD1ebGh9+12v+VX=ky& zt&9z*;`EX=(ZDOX8UAZC;OBiM_N9pqx?GHhmhYfl=S-0VK$8_Y;bq=?KSWSP{V*$U zXobeTUQxw&SwS!*L-%)CXJLJu3_uVloDq;}x)`3?(T?@BJ>-<;{ROy4gj$5uJil1o z+AE6GMQ+31Cr+Q#52QIBd>tr0i#;)zE7HJY+DLQTSC0VcnZ zl?{A#Qs>fyFdH@$nBV%G5Ljlg+Hv zB^(0#ct#59F~ePzuMl2T7%|_+sKz8e83}*!HT=NT0E{}6mZT~^5veySVs>1ohB^6ikt$R~4Z@C@12Wia z)Jk`{`z*ycu>VDjuu=o;)%NwT#pQ2tx9aC>;gQpeojFg4o@p2;jo8#zeP3}qg)or6 z>U()wfZk$Rm`-yXI0o>!#cP0(XbX|xhFutZJvL17JPXCO6{bNfuH;{)#JP#L164>S z&;P^2s~JHWl3MHOImYQtwl!jo-0$vrnt}N&m9Ge%drY;-qm{oRLC4NN?Ox{%xm?36 zf-!8(fz8lv4E334N$o6dfh6c`y%jNXS5>GMW8-wrv^%-~VK=oAnf4~+D0z#oXch}D z`qnH#-6O*aq@jeK|UoUGUs059h#m)Sf?+pq{P_J zH5?4GFz6QYF`gV5?b@O@-Csj=ihFV6YwDLGR`e&OqL)Erf}mY(xe@h)1&jn0jr4PG zJ06%UfMb4r)10=g;Il==zIFW84ABkCExn+1qUkP2`uLk~94|j1&5@K1ka&4??stD* zit)tv@;__S?11BLc0|ma7oGDxr*%6TqJE<9JS^+-ERUUY9$-)lTi4^?x?_09Fq%lV z(}fOm4aR*p`1&4d;qk5@!z9q)p0kAeN~hIgP3qFL{IAjVc;OrE5&calF7%v8*HA%S zOlLi5G-7#67&v~Fbi*CLvGQUjm>T@aDdAYqTM2PV#*pog##4Xp$MrCs+d^)U48x~` zo8AH+mO{z#TG)O!a!l8Jo7a1XAWWB8cJ98+3)!csEhMZW$riQp{535AU&a$Z=akXE=H+v=U8!%>E*dqC zoRQbm&EWp*N)aJEtTYCACvIptfhW*F*n2zEYKBcbCk@W{E2O`f6%#0E!}H8vQ}Vc4 z+&iyzwCCN|%<@OJBfgZ42n!*usQ6d4t>iCnnqQ#aLWJn_T$+~ zd?7wC!nq{J-r^-7$2EsL61gW4XO6dH^&(hjY2%o%k`^_ef3hbv)3Qpu&Z1fb^-0fm z58rDb7R>_EMNO}fR~KICcH77^&oG|TML&SKwKU+XG~|D3ntQRL4NtzP%lu}P9M|Su z+u{UVaq!uE&Iir3RhHf8m%j4u3TEql4wRW@_UNQ_+MNEwyr!(0DceD5*vA9{#MAK3 z)2=lj-ObMd8*6aq<-Cg(xk0*LhXJP_sb4)5%4ca%Ztok1XnG~m=|0k{cA_8DFKZC% ztJ>~gyr1KJ*^L9y1yilHwG|qV05~8U($b(&&p-(fh>elpk%?4ck~JO1RsQCX#f8Sv z#g2F2Ws6h4ud>*f%O4Ar-4x|!G-_`zSg3CXH(89MfxM@{=|`u~tm$-tqDbJljsNp$ z*PnrH8g6#K!BtV>Txcew%~o`cX8ocWv!RF4 z2&oJ>PH?I&Gs`8z2eG(~$=YTX7N3!up@$1tk*k|xAsG8WZWLds`v=y;%D422_#FR% zzjzTy+e&!?W$1n^8rKEnev^`l1i4c+KY@IE3$_0p;{|Gd#?$Wn`KY6^?)VKN$=gX4 z)XXg^ub1^`uPOO%;Vh3_CO_{y%|TKj!!;nlu=|>%Bnr9w3ej`5_V?8_ZXln%V=<~F zb)W1FoH&;cOy*n$ehVASd-ZUJ9(!xJ_`=JC7V^^!DWfozR`2yYw2F$euS&a>D1n$b zm2oN&6>xo~IUd%+paL=ihvnJLQnZXuvnc1MdimZ4EyAO?FwrR!VSF-QWxBxwJ5;W{UeK{1J4pdb? zXaAx-wgJlc;6Xipz}hYW$7Nmui0=|yV$(!pFvVmhDiRu40FCLPWXQgdODyXPY#?_G zyj8(92+{5hZsO%0Ixliz1&r-{!~>laHG&pc9I{9P&FjJOf==ItvHd>rcJ8wKF;P@2) zQG;7+UFDyD#NVy_v2Jiyxc|W~W5@f+Wd5UT=a`JG^~GP~a97y|n)TuxNNvvqtsj@d zE;{~M{#<=P3p~ZQfegZIxoI~=PoYas>LOZJ(YyO#wVR?^&u+*zh&%6RLnhwi+}xq- zY3a4UutAJFZZyG$e5}pu>$ylc3Yxm10%9AmC3%mX|FT8L-73+A*ZQo#o{5E#YjLUL zgz6UB-Y{Az)T_Vyf(-g&eiG%H=3nlk<#cb2gv2`0A zP0g7yE7UK5nAm4^f`BOd!!!_mGr}3Xlo{Hcgh^i)<*jGZ?9G9o31$n*EF&(5W|MdI z{JDj36fyMR_jfeW|GEL1Qasv0$jmen1NT8IZOzYNpTReNbPUU(nVF$W=V5_SnScAD+g{ipoC!gnkQPF}JgHT7eE;UeUv1x0AAyEb43R>(x2 z%z!CQL%b)il+vP=z@xnAwLRtvf0oI=VJ?tO>v(xfKQofvx)urM!8T~ybCRUZw5R<422b8QVdxrF# zq-rF1vW#`Hja}rKE|3;CH66amTJ<6XaFKqvp{zH(Yj+~_a5=GY;c-os$EAx)@7d41 zxysQstGd1k-$fy7_E%P3mU2_xuP`%bUIqaX`511k6Xyn(NQ8C*+VNmp8+Y=Uc3L)| zat`EyvCH$wv}2foJ22x;a+#I200k(}r5?~BrR{)d(0cAziwNS5#Z9VrA=c#gCp5zS zC7{#l+#;Q4lGvSEZtmCw0et8zz_X@-5(*PRPrR6S+OQz%8TRP$Q+XU%(517HC(LaW zT_k|&3AQJm_DtWixnlY}>)tBVN&0%i>EqtN^{>HIyfc8*+p>>cV5s_z~FApkcsf z@H{RO@oHB?J;ZKNL&Hz1m0JTg7n<=q3kA4kJc7{RLmJV!T;n0xnk~#89>TX+o0$Rc z_TW44jc?Md3^&oKXPmU6xHj0izuimkX6UpGC47L!$ zd5V$E?Q|@z^l@#U2lMAJTDID9@=%)2)MmKa0Q20Im2*`?0w{j)2@rZ{mZ?1xGR8VI$X*hlYlmlk`1F=7cn0sJdBv+amG@Q^^I>O3ZzmRVVOUpS7&2RRVzH9j7 zBL9}ZSaUmGGD1PayCdBAi94S)UhdziOgWg!&;M&GfrY;e2q46$$i%!pIsqVI7;>tj z`UbGWzuI0mNHe~k`$xe{qHsM>CV*5z>{|6}`%&A3ha@e74gQQxPA5y1>IhLbbM=vu zt>>fp$q!4beUxZM7566WYi7sHH@h~f9!^g05C4jKsn!CL;ll9!Keu%9d^1);@Zh}l zDM12%+iIa@)$Om_ept_xjnlqc`T29{j|HDk&ght*2(wz8zYr_Z$Ow)J_0QA7TD1EN zeUur0s*_GOb^>eZF5=v!=~=CzCkO#J!85VT4LFlDT=B%S2Dmi_x+RTM6?pPG%-l=+QdW<6-148xE&mM(+IR4|3z&m{00lF4t0TN3UZrcWn7 zp0V5_xkd1h71sQ}y)x52@o|c|IeA=~M8O}nJN69IeLf+vm@6x_(Vs~!3#?!DZF+NL z@og2t79hrurr@!V9U8)SN|5u+Z*#6!*ICOPKS&F2W+rmh{RwH2x%&B`?`Z=vZ8?VvN>IGF|8N@CF;& z$isDk-3o9l^%u;FcJU8ez&?1J!gLXrH!5sATyG%)z9}FX-<321W&gdjp6X7o`K6Q? zhbv(v)~fuyt0Otxd@kB+wRguD0tVt`n&*uq*cApOB|VlNB(b~LbSq8z4{27SVxNs# zP<6|ont)3nE7keB!6;ph9}M4CyHXr^=F~--v~}^KYM*XjR^B!d;*C>Z!tY!Y_B|6B zi3n?}r+zCp_IUpq_T;+HJu-z6p?Y>i-#d^J7r95)xwuEPi9?(pJnlej~ z54t1vIaM|9vcHH5EcyhQDE!U+;xkrSe6LM%hp>0M6M36H#z=5CTN*#1yz+zo5f-o14&GHlwuH_SD|FcH)NF}ZLOjj z-H64Rh6~JTG*knIZzi`^y{g9S_GvKjWbHShd@&S+4RfdNQ#ONv4w}Q-AS_DJr(mNy z5Py0}PZo5C412mtj>TbCBTm@6gUd>A!wgxTBk;h@<40=>*#>7wRe@@69mifwZN>Wj zWK@lOxM^MKqJZ1q@WnK-gVs{YUjU%BBK`6Ha)(Uo`swQVC6W>G9n*5W0yi=CEtfc&Yqr}!xtAM@HjMeuSNaU z$epJ34y!{)`?}f_W_s1vl&9$3#$K|aG0k)Lv@~5FKj&F-7g~yQUHf_<7tG8#4z_n6 zyQcK<&psIA-{&bnziYikV%HQ?RSx%SOFlV27mXpk<}M40Fj1ncFWm=G6cm4VU(@9= zn8v5xucF4}(BuWqhZ+gdZls(0rsHmP@^}JwtevNcDVyEJq?0~0rUm$Zsv;rS% zMu)W|>#u&W_mAU3kTNFkJ=Bb>4^18ONc~~IKcaEB{V_RK%W1uLb!y@?Gq#W4abZr2 zRhQE_`1}qbR(og*?Xk{-shxs{*DQ50T{a*nuJtbF=7sPFdm~4XN|sNU_6ORJ6O|{% zO${s*1I)&5@M-`(&J{L9^K!ZPs*X$OPRk3Pvtq}3o`$WKSXe@G{3jVpwOp^I*WprPs*BMFm9tebWZF%BYrg-*ub#&Z4j6K?RZ<#rbI1iuEho zim?f}fUAii`-qHIuH5G&XHl^aP6{LP9lM*(t!S#_@#Q1!WHgf=%-k}8Huq-s1Y1?T zLU3UQC}fk507KGGB+a;Czpyf<$kh8N=dGhpgyxH8lM4ifArcD_HD?5$C72ZCSTEpF zx7!T>a=jl>P32q(H#m zvo7kYle6e|AiKn1FmxA4A8|C_tFspDEb>*ke}8E%wq55UH&k?8uE_!JOS!*TQBr%2 zoq8&;`#sN|66bBtHWjraQ1?Yt|86*l*;!RF%$&4tUvXs)XtwEqdXW2n)F$!F^oXiV zt}{H*ZfZzM_r+4kE8lJp{f#|4v6(h@y2cp#5!{4EWfCkK48Olm)#b`6P4V5IBH9|m=;P36-~lEBREY)J$XuKZM88gk+B)k-p!>pVXNyo6SkQ4 zvQ&XAiyn6QAuZ$Ac!-U4B2$k}qx=Lvo$NQ(!az4eLCO&N3jY9lniO^1nf+W!CT)Be zCdB^C_JbV;MA7M=W-rb;qy_26sNsY+v5cjgkDc?trAT=pRTuYouIa$3{0R32K{%7= z3)i0=k`1JtP;eET?-v(iV>OXS`;BATh?3i=6v7|{FX#HC4Gr)AX>sI8g%y=p41G>A zg{7GNheVt%gM}lPbif0E4R&8F7m@kYlOIa4KFZig8IeCH?ZBTdv;|12{>=p(N(27C z0+yy70OE*5pi(0iBF;nE zoD_BzM7mByvCr=7i(nxrhqR|F9Qc`!?cAxpaN9GM|3K;EYUvI<#(YGuHcereTUFPQ z@y-p-Gxmhi3|0`3VVKT)eUEMLj`mlVrFW|MU4hbhv@1)VBnu}ZDcfxug{ai^mu9N!w2mMc~?z;5tet>xx z05}Kw;!<`j=jzxhoDhh!S9P6m9@h?G6`vT6gKf72!xx*wO5^%#xTpsP|_uO`Im#Pt& zH+%Qo6n2j6%rJt{;FtE2hG(wM{M^f4Cp^u+1?V2932(A8QW~qb7q~W~2Zg(zX!+Hs z6{dg2v3A_&qkT$eqrb3b)Lf`j2N2415S;Nu>87Y>hSio>apWaSG!pFm*~MZ#!Ap#5 z_ll&q-t6l?1{;P%LjW71OFMs<(X`}fu#@sP5Ug;H6m@j3{V{>s#5uuS1&KYj^Bhqk@Z5m76^~Duezi&h^iI zdIn;>hudMJB|jCuzjT~bhX>;{ZuF;O<<%A1X)9A(Z+Wtks$=KTB#<$g+tQk0Sw;cU zRrF(h{MBu*w;mCo$5a0dmwD3Sag5K?d+_?#*+o2Ai5Hm2vf8%9Ng4{L&baGKlbD7t z{wYgKj3YG0;sp?DJexr=OZ`cg?u0+Y_XDycpQd3HLagD2%wH&Nznnd(q%vYa7EiN5vw+%CE&c(XAIhr z@BbX`1TqV{yUTXJ%}9SfAk=V2GA02ek%%qZ7l%(u0B*$XkBA|wx`>RQK8u?3 z8rv}(??Yo9z%7X8)!*eB+C?J3KKXf~1*iEji*u||#?M_nK4#???WP(%zlItc*sX!w ztJ7GioF90!;Saei^4m zE}fb!Wx^Zg<$4*z5%rqY3}gtFJV3sCl-H0pkV388)F- z!Nsbd(T{5Z=iF`2ks|IEBx|v8WAE*F)#$z<$=>^LM!xJ(%FAEv&l8)6N*0I4UwI>$ zyf^F4wn$tZf5#=%-6jB^dfd*1qnC1Y9S&tds*`QTalsn&=zh$vnDk!8G%Gn>t$wEj zKrP3qtTHJ#ztin7_KVl$pdaVZ_w%sRlUlzUO}7u!N@9dWL(GtTU`<$*=)&q+aheRf*B$!&e5 zoll?_KAU~`dra%+Utqfq^>dQp$0@)G$(fZZ7LN874+T!h*+v;JcgiO&$2{wNdG*8+ z`-4JemF|A+iW+fQqG7|l3quMq#!jnx2g=W^zH?r7oJP+E?|m3Eo`}V<9er1MOPf$? zAfXk=(Bn&c=jn0f$!nI53s>L7XnBx&a$IFW^@z=c=lv-JzHtb0RE*xMfa`KG(Yck?~JE`s<^WNXrm0e zQ2XptPFE_2b_&iJ-2AJK6F+4IimHd&}csL2$p-X z+BlXuviO`d2bpra(Xi<~q4h?jOfF~?GA4g*fUt`5IvGAgMFog-fbq2V87vH-a7N4q`d@s}*~kxMFf@7g__4sp#74pw zil7}ZPoJY}eePz@F~a2h&M|1ry~HwQ@Lt~)^#Z4kPZt+kKn(fP2Orktpj+>Kyje78 zcBB{O`t@5|9-PC6XAlf35^YtrHQf|!Rq2xIs`h-SRHS`S;GPd)iUpi3s+%FoDoIY@iB|FiopMH#m@RP}*l7ebp&Za@c6*})W+VKx`D@#kV+ z`UoH+iFdaubkd!&bt3YOg#NpD{tNGopq=0EH{AKR*|OnxYrau>P&%CW?iX}X>0*#? zDhQ$Inn3t;TinzIyGWw>SKn-fDug1qx&6k`LAQm=70?ytzn#_U#esBSComN5%JodsUQ7*b|@tEAmzB=c4N? zYzKZCd`fTT_kkj7=sj>5D2KM0VI~KJ*|I5MasxWB1>*oPr(s(0Y-Fb3^8B#2IB&f!v&lkQD1%E*L&_;#r5OE}{tHLnTK#Mf!_*M9s@R78 zwiYaFRMC5rXTLX1A0BTATy=g;=Oa2FQtNhPklo5^RDGjMj+TPrmPiqU=WUpst8V(% zG%Aj?>1YXL{6cH*Q<=q;or57PDG|f#%3T&l)BB;=W2(!)LXf%)n&+^Iw78gZzp+Pl zJ{c|ap&SfWZ}ELqY=J~iR*TObBae3Im%4<^9g%}(QU4^P-=<&J7kbF6plcjzAioJj z&_SDA#_Eo#eb(LXWjc)a;hBALumm=iHQSbp0{eHn-D(z2v!44*(+jz4-IEmXnf(ek zY_!RdmGjX<-ED6m?RU-KS-1^)Y8N#KC8gusX?=Zt!OSUg+U|UwpHAnul2Y#1 z9xG0B%m|cJ)C?ZAidGs364Zi?-tJZ!%$gw2jiXMDCWmCF9xNW9tRDf4a6r-C4P&`# zrc^557fKCD_t%5}g#`PateSEvHnq9X6o{t#l;&|i!`F}lKzTi$McJ#Xo#6d?hgNqn z+o~C`CEj+Huo$}uV6bk(?53m+fM4dV?IC3$n=t9GXA8w)r2xKa-~vdkR3<$vP3r%P z3&TEQObgfkBQJaD`~RS;UVQu;oe1rY)=^wQ)Gi&CXqOAeR>ay702q_|r>Psr=17uY<;5@_p#F#{=X-fZOo}-T& z5~8TDEC2KbkeTPI6?QMIU2j?_m0dI0dh0*k#6A(IMVKJ{qsyrd-Rt+c-g;=A+!)Ho z1QJY?P7*fgj~l@|17>~P(pQ7g_AhSI`aaC^DD?cr(Pf{8GcWq*{|6kGJQ)3cw$AJT z{08VWT`zuJ_xN|IF(()ul7is>KWy$l?LV4s;vUVf-jZIF5xfqaI{!wbaY??C`|<0O z*6i|QIoT<~=rg+Qn&I_k>V??l=PPajX{sRO&cDI_C(@RG+tc+7GEaX7?=~&69r84> ze$jEMg@;|d{7tvol8;=3q`ypCUXK-hVVbne4zbu^WBLK{K9&!~y;A`nw{EB~?9eokcH0&! zVzbqiJs97v3#G%&ofUtB)-RBO&&UJQcd}#QtuMoS8EO$CqMOVaeU4&#a`4+YA^YKV zkmg)=(+*&NOnXVftit9gYBb6#d`kUp9+GXC`R;o1&uqjnw5$*3_y~cIlVU(jGY6#YvJB=SP+}7-%k4|K9)j0L8sZBa`%a9 zdBVF^9>IUIXqXw81Zl&fL8Dc8zZlb6{* z&GnZ63QI8S1_~Y2E8$8lsu_E$>)3E0s#{vT!F$l$u|EmJ_v)SgmC>e9eaKa530+DE zPnO7TPNq4p&%NgG$8QuLDl~!NN2@MS-E9KiK^|B5pW2#ElWm|p^?l4U!PIMn_oqLH z$A>c7kzaxW%WobIGH^h`zo>%Ym6G*<-g{NIfY*?E-;^4XAlIW52cPZe`xklGDK}qx zyWo$&1775AWF`>V^RbTIser?&!ksO2PC(?>kd)E-B@6Jd9lPQnYrcZ(1tYtFM!_o& zhgeGff}CX4ie9-hwbo-?0?#R%T?yKF$gcwzI~#dx>b{F%@3Ig=w*K11JvQm zaMEP5iL0{AS3@q!=mCTfFx#L0C*GR3PZFk@3}EvTK%c`LFN1A(`?AKS@hg=j64aMf zALe|v0h}w1%P~I?@1C-I?zOsHSB;#P(M@`o(zuih&%lhTo9@Tma}+QT*Fb}jl)``j z0-Emm2svH;{F@u z>DSi=@qluN9(KU}oY|eGBX+i~52b3^irU;Mx?8%JY>fyVUe`ZIDc5A_wAFitIaS-& z!^+zjtRpeMgV%*06=);uSgTDW(P_+bhq3xNxHieA9ZRR-b{I;+NZ?fLz@8>*;5xA2 zQi7l&04rm)9bz&9cU^~+t#&keoZm5Fqv>Ava1t*AoX2M~zZi&{y<7SsnK z$LrLet}nrOq}Lab$lu$e958elTBIR^Hd66uI$Q3jLCb4JZ}!Jv045_g zNg=EEb71yVQti}#AdM=alpc>*bWY%3=w&~3(?1O2^587=GcWwvvybJzzfAt3H2;M} zt`q5cZH`Zpy&QxB}EL%_c^E^Lvmt=xrs-6 z?z+k3CZ6mD2W34j@Hi9}{Lj9d#j93rdE4wYT*=fdrHV#`O+eE{81k8iXzBB!yk|}q z(n#&(XCkAx?*u6Zt-VdJl^siQCQ?0djcBK~0L$b!c80vwZ~Q9WD;Bqem!krjE}-ba zkFD|YmPERDYKsV-DvZJNu!%PoU<2i8bfKwfwnM6~Mr<1xJ7uW?*I7>@DQ8?x-_1&} zC?f`XHj4eq8ChkqxcFfXUR1L)WQ|#ULe?Y|)e?Svi#OI492b85`h5ulH(2;`yV!_7woe7RcmljCxe@Cb;v?tB}Ke$%Y2@xr?G=(fj1!m5d zUL%eF1#n$a06}FpCpTy+Iy5KhmT__$5w!3sy4>8LA6;rj&jrK5cH_rf}lN zqKp5+quWyGNN^QH>9dxyT36>N5qQ_-MbztJ02XK?4WJGni>CiYy2kts#pg(lPVF1i zQtWx0f)NeFR;rd^g|(;P9#cW?JoC`5Do3NZFghGNN9N%Ewr!)j4kZ>9aKD=+@8w8u)M%|&B(`fb$1T5$} z6m6x#X`tM|^u#{uNwOS^QZ1T#!L7L@Myp%w(fMp~DYzf@DT6L;S*mGOCa$#23qFy9+1?4*%Kh_7%2LkUx^h zX^ly`VvRA`sBY;_RbsasqXEga8~4h|he>c^KeC#I!_Un#4m`E9v&6o8PqbQ{7If$Mt#0 zWVHG?1};7Otk{qBOFvL4;8bFo#@-nKrz>7PY_`Lsvi@I)@N0yM-00nr{xy8R-0~+K zE+bzm#yO8@7skqD^>KDS0oEUR$(lSjh<*e`pm8ppA>--0nmQE9@zGuWr zyX`&Usu$)Zn`VI@obPm!k1gE6*f7SfFkhT*j*2ud7k}ee1ZE_7QJPZ`P0do>sHrU@ zA9peP{o96M244jBoMgp&JodlH;6R3}uTEqU+p}9@RikpktmUs~F4a2eccSN|hf_iV z5HqMA9tsk@W;hJn`32{pOuM(UyaDXok0l{Yobc|8dD4qx&g9>}n96eWuH0 zNAdL)_b}+54JbCmbZP}Ix^1u4TeN=GTTa5oDAgJe9RajdZO08#ZK_Z6-ux1yEIE94 zf}AzNiq{`iE!4J5Z)cxd3r`kFicG;pHi5WJ4LmS_t^qUUIEAd;#06{8gLEH#3x5-9 z+uaTp#-H#uKJuqphK-@5v+x2~-0_TYa9uZ(uRuZP9EKtyzJ8IzqG$Q`=<@F&o+tE7 z$&~|~zskA}Zx*(mlQeKVm8b*~#8<~?SGG+hG%gEUEB&a9V+r#X$f9;s>*f$n8F$0z z%yTP%iK4m!bFwY%Ve!J;7GpbB;*^uxO1#NlpcYD(uODV>UO zVSweg0D@~1P}#MqA*TA^1fbie#R9RZ>v4lnmAWGW`s{(#7t%X6otfN}x$`ebHE$@3 zTl4d-@tMB+_>+6q^hGUU7t6s^C~<`8h$+l0!QEG9tFcsI&Q)r&xMdP*vj;&E+!LbI zQgI95|M+us;&dZk*~Sb-O+LiN+T;kj=DH)U+pc6EG*|IbsAkm^J*uIe}ymUzjJFyMx z^gYSoUr-KGpGL!k@R^AA2SIu;XG2+>X>vBTO#T~?AT|`9!A+}P#Z`L*tl@XyESo6b zQ_vQ9^*=h#$;T6o;76%z`jT$HF-22I9mS$I>FXm1c%zXc_|pj+9UMrsh^)ZYO1ntlY)A_4O_W4myDD$dCl)N3bPtM0^Y^MLGRwmu@BGh;8GYJZj8Q4X%Puyt z{{GQI0kxanZEv6tpfZ`wfB{5{xWz*I)6SVP>lpDJsVfjpJnjt^kAlEMjS*E-FFjAM zVuJndu)+rrxEw|eV_Zqc52In9X^aDq^a|Wl_k`k~?7WM<4n&x}oK>x#e3up;|Ilu~ z%9oRl?M8E@J>p|?Q66{m2lnoE(m7STVwvDWMzbg%vgntyHD{Lgr)U0tZ{V61^RlM< z?)f{Tl%AE8Kbz{sVPQCZ2YTj92gaB5M1=y5 zZ6OKm-eX|f_6XV+-@J-9jmPaV;s}yuW=aZo2@dB@2!xx0buVMRTR+c&cEL9F;2AkUvIh=+L>(&ln$49{v7%Y?00*=$(SVF&WKZ7(zOFIX`0%e z9urUQbT1%gR5Cc7rQb%8$XSde4byV3{|%AlH+QXxV#&MNhS=Z{d~!7w;V1ybZ)N(; z^_&O=QUpM;Q>2WR$4UHKZm|ddgO&Xoh-74%f(l#kpZb1>!&ULkT64v46BoYiyw`I{ zoYoYbl5BOf!$0K2`-g*o-%6@y5?@j6RXprFD1O+xx;kYcRQPPyIOYN&)H*z(Z_jdix>JfB|7N}%%2(J}{Ma(pJ+2xoPMyo!<8CMS zp;r)*TXUy_!l%>vuhux03nh1yQ4Izjk=3Z^u)gkKtqgk`n;MkWJTiClpzkMz1`h4x z>e_&J`JZ25cOMtN5^#+z&b#jC7pDuhp#8m-G=b)EV4@w7`5^aIC5_?I} zX^|lf=Fu$?E-T4AKsWCF4=li^KUe_Jk!fNvaWke7Qp;69kKc5vnuF3!ms0rLg<^>L zLm}wfr3l%0lVx3m3jC0>4zBg>%`t0W#ghkozFNyf(M>n~lE~?E*Mmf^zB- ztaoQEI~A2r9AhBKul!C+a^3?_!2Gw-53$Mp=;unhmwXxCeh5fO%vCx2|C8grle-1j zd|;RV#UV2pDcjZP^CkJtUKlXxcN$i{BV=m_dL~a?_pTU|OK@tJ0OM4Aw$S95Lg}yc z0>rhQb%%h&oy`wl5_h^0x@Y5E&CW(V#O13lrZp{X-#po(2&~J$s#$&E+{T}I^0oG{ zO%}uV0KDeH=2x!lI;Ibw!qc43onLqSe9ErRDEW2vWM0sA#s7-Fu`y=Z-#@Vt(2cr? zMoy6k@YMeM@-NW3KKWTs(B`_a`)H2XefjTUzQldM6MHR&>*fJ($C4W1YujdwjB$7| z3*ZO4yS{V4VmW1<;N4GdoaB8cwOLS#Ja}@>LxAs}PqKXWBp_7-rqVmp=F3)8NjaOi zNRFqOnKYrdH%w`iy8&k6K6r+z@4X%7X3E-f&!mr&tSwskE1#9r-Gyr6QRO8s78$m0 zBNjVMzGM>wOG&H^TB}B!baCakAefKk&1ZjC5}aQ}&0Hae8{D1S=sbU(!Rd8V zU}B@HtWX<7%Torg1;>A0Gl>C4*V^TEZfJ#0n+FiJws9_rA^{pH?9IRv=cVm^b+EOR zwfj*e7TQA6FL=wR7R|<@$7tPm-%|T*CV^uou%f3 zB=|W9W0yeiCqKkXV3Pg9aPvVh!D|A+Zq;WgY8yxhF%jG1SFUmz?3=XH5jNPhVOb=}br3_72T4U%yx&UkES(;U zvJ>TQmuVU!_1@6e!u8Hm^!wE@hE#bTE~gz#RKpC;7<|{ya8lYJoNF;WJD?a`OB~SJ zk5LlqbXMgf?Mw$pT)(&5k5ek?zW;@X{{#ed?{W_~0(Iu^lY_|5*eVJjEe3ZQ%Wv?R zBWmCBCWMGh;iSWo2fEF^lqp+=6Xirny~((OItZdqG6pt736~qwGLjKZV_|#8UDZ#> zEnKiFssVo<;lbvgM<^}RJ|}^&zcwMhV{sOGqum3eVP>R1EfpW*^qIj##iQLnqy*KK z4-DV{ch@M9F_AF(_Ai?NLkxi_&EIdeDk$^&;V17qlJx+Sfw?y2*mrZrp9 zD;4C%o)ttRA0td$L;_NoJEZv1#(NTX&l)^EnLB(qN6sDKPcV_~iNoN4 zJ&vd-WOZ)^@!yaJe19MZ$M4|tm)mc>Fcb2dB?{7QCW4SYh0*X)KRHGVjz3cQ-=Ylw z*f2`M8^`K;|ApZTxtQHWIRGU1Wq^Wi|1tMZZ_cgAEMQ_$0rm!nY5xbn07Ho%-zXFsx&5-6_Emr6#_^lTWKBaL>Wy?fI0FE< zjxxF7lr2l#&$Z+q?b%ChFmK*lV74eOJxCDcun4}#{DB;E)`#x(`8b*O}lkm{Bjjc^3VKmHH9b`r{#fC{g#O!BAx{`GZ;oCl;*-y)FE5O z%xk-rH4v}jL(6mUj-N-MsyR^2YujxfBLJn~BY(d0jmPM2)sxSz<2MVleXalbXy@^* z)JI9kTURYK^oxA3qumt((xs@(@)wkW>Dt(Lht#XoFB*&kxB)N1N3J*7@i?TI+*pG3 zska+lK&j`r-Pr4Jj%(dL-*fKV z>vKwS4>b7Ma8LBqz+{rgFt>5C?5e)WGg!p`%r!7XmrUjT-G_T6(h)YE;*CzwJ?e~g zx~kZ3U>vz2W+}AD)n?~u_gKa3D?xak%G(A*Q+T!UTMl#VbV;Ux!Wy&A3`FD3k3@paQPGu30?Ur~ zv-pVyd@{!E6&jsQ2v`yoKC}tRTHx+Mmuz=@Mlb*io+DKmJFW14XBzafz@j4ao&Y$* zJ*1=JHZjm4};98HogzM`m@#Gu$_X%4O_FeBt&m5t- zK-}THc(+KHqQHy0ekuktQ%5Idb!qv_8BSp`|NHy)z0{*P?Vhd?WOzOCN80|Y^%#zq zjII@bVFu$>*R^CxqtiQlvt?d348gjC{X?oLlX6sjx&9k10MiTir51Ov&D&(5@(LLc zPpMQf_ViFO`dUL|deRYL0+5+@M^D5*qOa|AuLv#$yC<&zG>NH0B(X-<>CDawh%0CR z8+_p7A3nnKdex5=xA`>crQ@}Ga^Ax)1Y2Ba3w-`^o`{z(<$*6fOzKLXMi!tU4ZGO< zZ?FX4n(sj2pPpE8^1Y>x`yT!Iar``0NHU#><9u_{qUA6bm;f>`Z;;HqOQzFGV#Y>A z$p>cc{1x9A}>qv{>qC3&Hfn{C_~I5D5taVK?NSpXz_Tv#;C6 z@x>gsi!JSy8qI%|#MCg2-e_4wT?4M#ROaTrCvF~Rw_z#vK7t}Uz>*QxAsJVo4p1Z!V_vuW!E6 zDKoU&WICw77$q|LFeK~RH(4#Cy7EA%(joYj{!BS-$7Juz{Df& zpB!ALGMPj#*m3isLXG=9S-pR862PiiyRAc-%_Ti?H4bSR9qMxFXTsXA1EPICwkvv{ zqZ!<$6$q%T=>cg#vBSneXg<9<-8DX?({=9_bKNFQ(8(%8|3_IvC#R`T5lD(R>eqjO z_92fIlhPhLodMLI_x=Qg(f%Q2VWZE<8Xj3I*;z**5%7G2e^Ci6_#yQh%d^KXo!iQ8 zR+J%$4o^-->|xg}KSa)l5_@cJmk5m($X+97JRv-@?gw`6^)H`!_sdk4?9*MxkQVf$ zcBUA>OPBz1bz+7Pt7Tl|@Wu*%9C%wQ;DTzf=;r{E{eOMb(!^HQ-KI|BDNIt+#Igd$ z0A;s4qcgP>32&0VlHFyr7^ShSkkU#aqR%0<{fKpCH#W8DPPd&EQ^;DRb#lDW#Ddf> zzh5pOeH{u|b(OtLU4IM_0dg9&4~x6{)*fg=;j9gcZ)1EF*;2hTKQOiGE(T<<1_il% zP8faMU_`hWSkbQPTCk_iGlTfJ^CNNF71-hj-)LUR;zi$kA#Ow@El4Yv8x5)G*XaC8 zD?`C>7zW7_n&LC$Rlkur)(=Smek0R>}gS`=lX)s$=08TKGniOk?)cXFh$19`W zPl)WKNQHil&Dl+e1>Y@;Idwv_+$AwUckjG!usfugMmia0EAQZ8P&oLb75*ErK(ijo zH}hF&3h4n<*YDqlJp2b3;NuQbW-9}Ff7Dg2CC?AHd*KDugMNS@ZW;TMo!xrN%|#GE zE`O~tm4z|2Zodu>Hg1=YOsR$&p0RD-AbsQp?!VRE#!=A9z#YzgKu8W|<*rb;-M8a@ zUf*uOm}IS@|9U*k`HKK;#mojKfP|^hKV-kpF%&f?t|5d~DFSeS=QvzPQFs&YC7@BY zuZR3*2oHNUwY*Yjn5!-m{Nuusty?@W)Td*2nQROfhmNO=%=#*K2%$ib%9Ql(C}bB9 zW6&gX9*te^g~lpRoNLG z9zgX1<>`=c`HY z#^2s1(0gco(-5NW(=xqaXjiw~@yC@N%dh;r$&aaBW8*e-c=L>pP%Z1XU}@h?&(UW! zGtXal^y189q&EI&cpu|@dd8Rn%;~ldB9RNX4pyAn#d~EuBj)U<2RE2I(q*r;?9bYv zzNY1=rn?rCNu<7$#Sk|tv<>|EK~eamHa1=-EpF?c!k@x$vHW&t7kPw!pp5SJjMF%C zg-^(|LT~&rj-*&f!0l)<1hO#C>GG}vhViS?DqKsFPJ;}$ZGOA&bpCawx<#6_Zz?mX z_e!VwWr#X<98#84JQDo2nOdZ%Wt^CT$<0Z70!rsHUlrDa87&!D4}$D5?Y0a*C}J3z z2DRMyv4gN;eW>BwHJSX)8s_NyAY;DSi8bRG`>b0vraRuIHInF;TsY@XacI#W*LXH9 zRlu}@1!7weS|TsLM54!&QTFT++WH$FXa!Y#8sD-XC1)D$&URU<>?H*!(0dw0YW%0(9>F>+_!0f zTUw+3-#)M31~ZNDM~@xRITkVq*_#iD*I~Og&Zkn8a|;JCEeR>#&1AXw#X<`X=u;X6 z4pNwUX-tBdkb4hQ76){nL@ZXFy#iW%y^ptitMbq)?sy$}K*Xy<74=13&Dxu}s7j>4 z+ifzGL;I{<{L_)U<@XT*33+7e8D^MOb!0b75f@|;>T^q5zvde3iw^f6J1$ss|I5$o z5O{{gv6x#DCbcGr(0AdC0aV|_m>4mSa~J{<^5w*BDLv*1UBz#9VTX3plBuVge?XBk zlO4q?UEiMq*Dwx;j^l6$e^m&TcAAdp8ztm(@J$Wk$eh2`fiEV#6_u^%1~8V2!h z7bwG=^VlT6dIQYjDm~(7!v+_~P`c555u?fh4&qc`nxZq1v9ZbX_|$T|*R?WGO%n0) zN+f}`2X_40>MBEpN1}&)V^pt*2aS4K>aadS_Q1&y3{+gc#CHjg55`aYws!W}K=x7n z&wa@8ehUDy99MCWu154X({QoTjy(Vs0IE}B_iNhVc?==k4ujIL{v;cVy)#l_XGWU^ z!3k673$R=2UW=I;xL58a3M8~A#h-v`cGp~yAEF*4yHGsjbu1(~SWPN&B~o^4wb=c$ z^w$>qPtb~z%ok_GLa@4q2yqej$$^pd-pVDJc6s)&y-kh(^4>lmYw+g6H&X@*njX}q z1YBaz+j7ui{$&*jIRoJz?4-YQrWnUMW1qrKLYJ&c_P z+S}I2n(3>|G1c!Y1Mtv7U3D!Z{RZ*mrgOVX0}_im)26a3QkJ4LapWdDMqfPiEw)Bw zJXD6#eG|1|rJ!)YqDY>DFP=U}8)+C73t2WX`=BQCIojbP0tu|3|8{y_DB>wE=Xxyu zSLJ7r+fZq$B@{(DkE4sfD-2+=%?l@fRCiw>t%rY`|09nHqKQkt1w1dpqZ z4Twu_1vUS8u*KX9{z{s_dh(mqPHSowhqUBTqnW#;F?>LDH9ax(d-%EMKHjWsZglO3 zeyV$UwF7FlH-vkTJRYBY{r>E6e61E!N(?e zW)j`nU(m<^uF-s7@3bjw(+w7{j*NU0*N@oyz(`oFjuRt4qpo={WCo0g`*fju02_>T zxmfHO;R;ZmjjiA7k#@U@xmPxK?I1k>tTuLH!Gg)ggSB~P@43(#`timKTAEgirJmCL z&wY>4#wuuMUNvA9)T)=_GaRpdg^cH8o7bwF4F+CC67<+3YHO~qqmv;3_0~(R@-iX4 zU+9-C5x?a7!3UcueH!h5FD?ZxJ~eBU7F4!{WS-swz(vn)kPu?A@jCM?K<>~q7EnKg zu=8$Rq)kqq16J3_=NB7XqO-hc%>x2lduLMs^3sP$)`Vn(T-i_pPZt**5se@{|YF}%7uL|fBMM*3m``%itbNulwZg*hC1$Jc=$!m|}B8wg% zrhu;Nix5aF*=e2Lv?Il)I1#tlxw^1nbXPacj)Vq3XNt*q#Yu@HfN#yuyF>8pygv$e z#wpx+4+XYAoinl`pqFN2S2fWOK^AV`FFkf&T`%|aBNBkT=1al+g6AiPt%%E&Ag7`P zs3QYl51xTA!mJf)JU1V5GllF>kb~MuUfE|C_MEumLaKgEEK*j{*?0Gr_g^qZkL_}& zX@n3T%UfEiC0-xgXa_M8_`|W-2d*V-Y|nrA-y5y09qFemfG8#2j=b9S5#R#vTFMG{ zsSaOnc&0?V)Z^wXiV;OL2-r5!TX>=m@t>rID`uQOFMYtQPfXZK$ z@7x&+_@zvCSWQkE1-V{G4h0Z}?#|zGb=wamNUf{5yxXdW%JN*kFh zhi2|dFdrKmho3f*U-P*+emVHYWZsf1U?Fo)TU#cBlQPu{hL=tbcHJT`cGjI`s3@IC z4bVL@kF;JpbNsJO-ofLRdO)@n!08l@;aU};$H+*+z_zI}9M(U2;K?`TLPRU8M9@fa zF4CEIJ2YPZ68jy{W_{2y44`^IfKCCO?JK}u&uBKfwtSaTd88^ zwE6H6Q$ip)x#72_UbhVz!t8JEw`5s12k6LmbU0V;{>@(iLMN9kTx$&Nl#;g(J`3At`o{nL zreWv6`l2)Ic>XwmL8c*I`Z}EefJ=si3hkr%R(*1@@UgBZhuHTX<8Kp~T6}{fR&SDzcgj@PdkS=%1<$n5 z!ZmF3drw8~#njA*H=z^vu*QN)a-myewfpj%GUnslGaNB;S)*xve1>N8>R-b5U;ekR zHz%-_wPOiL1C1Y^@#sOGA&cd`yH{j|sGfWSMS3>4z(Cj;lBffnv-6LxsP;{RQXGdQ z@LLoQHQjK`A`mZJ*mx6@U|B4SVv^`_f#yv~d_|O~tbYQS?HO@|NM4$$w%l%A8i*=- zV8SPT4nqyI#ySB~ ztjUX&Ez@D|@3N)ERpqQMj>O#An~ER2f3fco7wAo4r?-x!n{Iioa0iWTt&)DJUve*Pm@;Kpm-fn>~$#PyC99&LGBbz|6+0)W$QLei9!G>J-(1=rUTGZJzKSO8Koo ze9jlfkPpt=3o8GR+F!-y&;PRyPizBnA5bJNt+PSP&F(!0!Bc+_i^=-}%Ijouq67aY zrT$+dMeD#5bql*;@<~1uw?h@z<6NGy2iNV26A!0!EGTYKd;8pncot1Z91eY#e0Sp{ zP)aNkL@#(z<*y+BuR=TjUeqTF%(bFDtX z3D7z{>-#EWxkt)R=eXrGZ)hisYWqb91>}7SK&2)^XDNIoh3r_4-hUBved8+Jr0!** z*xfml|M%Ybk2OtjRC;i4_eTnn$60@@0^O=MZ=b_rH_1VJW)ULcXUudxeeHiaz9By- zJ(*5lL>bLtnh@+_SY%LP_H*1;#}RjR%#duuxLUfu8TWM?*9i5-U3G0`gc_+B6ld`R zwko^`z0kx$<JjCOw9cLWyPA&fz3MBZA2unB3gn@ zsv%fFk+qH_zS4J-xy#2X?zo%HSJw z<^j-|(PEbX7?|+5VQ9Kj3y$OD6cK}%vZ*CTt3{^RzfmQi>XHn7dU?>AQgnxjG`!EI zC?tm;q_)*$;=POP`B0x8@-(oXY1)q-)FbW+(9wSerF5?HWs{?sAOH7~H#eVBHWO;e zH@VPTv!*2dg^Yx0`OTm^mmgpA+-6v>40DyohQO2hUQ2O~0DlILRVYF0k zkAelnK8#)7OlfF-x5?C@G#!d({DOo#S1R~F&~(gOKK*$G_h zN?(eNX{Ir)bHq@N;OA@9(vroHK+8kS$w$P^ScFb!4g1;d8j)y$Rr2Jo!AH}3)I117 zIhnFPIvK1Cq_H7TVIjkp;8~z=(Z7n?sKe}dwj*NCiuL@Jfp78`+0AD)Iaggp6TdX?omGI*Ox5Sm+&4u|0Kdp zS0VYLp5oo*oV2nMbzO=0B|ERN?ESfXP%hz^zo=T8?fMix$2aEyIf7`&=RM1Gy;drG z9m@B*TOuidkIhPUtk$KVb`~c(eBZqePK z@kChU6GfPL1_{n^IOC6hk~05-*b@O0vn#m$C1Ka!cZ=Cf?49l$3^v|5 zD?g*zew>ZdZ-DYZ9VxEbE1_l5eR-BJG5ZH+b#h{NsJr9WrSVT4MV4I=VgIT`)4*n; z5nEo!1PA#zX{pGRHnGe}iAdvL5@L}XmQeOoi@Y5=q4BPFz5PaWG|LiRoRVT6N=}7h zr=|Pmd_r>0d@uat`4XOZ$pA@objdh(poI$`TeDU{Zipde5>G(b^)H#+@0wm}{D`j| zxL|4XG*Gzt^`dM-Q?nu&iU|xHM^eoBX!$Bag(LAD~7JFT^mqUNBBa ziEJs;-l?-N18tA%!a*);41}3z>(^Gh(d_x-ugv-*>Y{ zTE#LzGAT{(v0ZF=|LA_kC#c5eDu#zUO6 z@dfzo3Pj~4%R90Twp}7vlKU#tgINkJwk7AWvXjDF3YA2OS=BeD&S|>~y>A0a+-ZXJ zcj+H0UhJidv+c4dJuTL87Zqo}z*K8O^WEdOizUBUgz;jv+qw9?OTmNH`WXr}e3mYW zI=&1{9Ud=d_38Re*YC%f6ToMY7x1~7RXT^`Wg`s~5%M2-dX`U#o-tEJh@0;>mWUcL zy$fRhm3fYSTsl>JeuQU2BnK8*cRskYmk=g7_si3Ne)C|+3yj1py;Df99T{5G z{v@q#xA`F7b)|nZX?dhrBlo7x>1G<0v>g+(0h=1J?J*uzvQpfab zSu+cQhE%5w60F7a8&k)|8-k@S4aNkflAalz6;E?#OdVN;a3>D%rzq1?cS0sMAw2@z z{Je>X)9n5F6`P4aC%6q;4M7E`P~A{S4-f)H6`If_5k@_C^*xr0-L5CQlswSo&Q5JH z>QF3qB`O!c!U8Vl57oowY27it?O4^7LPh0z8W!tUbaTp(< zS&s#Ey5=>A?uH*06)i{nW{HFvWMivP-<>#9pmNj|yItn`EmX=nls;FXT4usCI;Xn}We53}{dU4SQ>)m$4Oc&2ji|sJ*fC zByTS-#?1(mV{vY^+~60tFl0%MvVZsXHpTz2#p60GIX8lMA|=xSP39#hJcoqm6K!an zOHXY5>b6D0nfddbiujw))Yn9%uNX+A^^n^J-z$=h4IJEN<4^72T~q--<*_-#%eHcj z??Z%_uTwFtjGYqAGEEDZsXu0AtK+Cn7J*UOAb;(4`rnAAk zC(`7I0D)$exW-xk`zmH_1rf{P%Od6VA)H0}MPKYWOh!T+R6|qnH&dU&PU2lcV0rP4XYRo;^GL`{?UbKh7%744ze-VSyWp#RmrHiotri z8_|5JZIci36^r<|4z87Go%57=%-nsqNTH4h*NEVN>EwSOCKF(vpC}uF|2Wmwzj%x} z=iV?8C-W|V6xH^g*g-b8{2KL#_>rLUcOsdOlJ+&oN`9~Pc)FvhgqO50@HClea+v06 z6_^ANl9|-`99uw{ulA3KnI%JWYAG7baG4y6V?V8jP;X>QEs;lYj?=Z`4gD|5tmblU zyC@pjniFGP9tx?64Uc>n{~IO4F!B{O=vykK}R!W=Y)Nt#*?4N>_Ycb z#9i6`2+CBtQDb9vYFyUBaT9m+m)cBntnyXVlHx;e2Gk&34L+BR8W+W!6uPbqyx6o2xCFDfWMwCY2b@a?0d5aTlhKJ0dvRn&{wsn$+r0v*oei z^QIs_|9z3|0Q^3@K4iZUM=*+a-IMoig|!5cW$qj!@`u<{M;E3bras5pF~V#=5b!H@ z(CwdJwQLpSCJ!A)w3%Uj>6qw2oT)xmp$3A?uY=UK%+1BtU%EK0>>DphwadRd zn~xAedqcqd!@3Yo{e?M*q}_kG!cSxydY>ROHzCxdmC4)w6FVD{j!~dnHi-Cr!pU8^ zyC(eoDdL$-=>bF(%YrXpsA9`&5u-BNpE{6Fs_p z0qh!WZ2S8{eX)OOvc#hQ+?40c()XFu3R*sKmskM6lI>=xw9GU;XmWs;TJ%+&#vA%J zEyE<@J2tYh#ZBJ#@1wUSDi6GH%_?P=DzB^$tjs{vd^!anUv8I;Rz6!*hDeVB?vJ4rm^*&&# z?%z1_+{O|YRel+1ATv~y`*9eVn6z6H!3SlMP&C??C89RKlkwLlY;p6(Rji$7mh*C$ z`+o6$xtHO?RM6wy;JCO}Y&lm?bK<%pmrXQ3U8vbNcF%shBN+~dbWx;65mD5rL*w(&YXu+(Fjfdn#V<93{EKU&m{E=b~pd= zac$|`fcTqUwC#o9eyWSO1Aho?vuTD4v*TTeS_m-#YP)pTi(o{Va0zE{ZU?!Svyr1h zpIspZ&^i?Tgx zuiyeMU#NhVeh2q(ekVj=A2v&j=N-9~YN{}Sd1bf=VX(4#$enx>##rOIqt(AwV%i~r zt$TI&Tt&&r26O~dJ=YC>4mjz&DTOGU;uV6j&v$Sx@pF6i7K^4}q9IJ_R#0QC7Y7m1 z?b~7f`$upq{({>&TZ%HJEHRK~{zcd(vUQ1IU%bxs40EzpnAMzTh@SoeLfUo z5BgV@UtAKfUZkM_Vr#rS%+2YMo+>8$Nn!@YJ@Vly+-CoI%t@Ghv|tdzRrnE(U5y&o z_BihAfshGmR|h9HIZ_h9Yv}TuJANyop5m3^`0DUb9sl{A-lQd8OOv4)sGGzMqhEnO zh{PGFex_^?soGMT@R!_Y4B|Gi-$gpY8RMNXJT@*kW{m@^`bAi(@~z~oo7e(Svce7w ztzEsrjYaD3%wuB3=Oa&j(%5l27h%<^A_q5iH#AJfZko@=W6Xpg7aCMJ;+{Sy{Qv$c zjR=dHNTARuBov(Ee|?Q=YX779h{ur$;1JT-W(!_U4e-9Dx=wXu&-CJOfaCcximeU}?y zZuS-*E;`-mu{qkbM0wR}K8*U~_JKDdPcXAU4LkdKXSkG0uf&A|&rZD08855`w0o7z z$M)l8#^BdwzR{ppWailC^_bRiqN0FTXsLW;bj;OUzupu>?ji2^E87djEVs5cwE68Z zzot7GgTU*ab3_y@&)xlYn}~8r#~*qzV=%!3B09@8JN4?J6_t~0J)cKyu~q#p-fC_# z&d%1a^k0L~>+AQE;~k`80fAAgbiU?=nVF~-MH=W^t2b&VcWZS2PeM1hgl)U?ixS#=DPUS z!bSL^duL?GNO2?Fe?LqZzN=_xa_RDQEmIhQKSahZ=V5KorL$QRbr=zBvMu?h)`w5V zXLDIx>%2e2bB_~uWww$BWq4Kc;`-|ur;zK{DRR3Ih64db{8z~Jov=15MdUG=iYZ}( zYpawMiW!+L36T!`Z2}S!DdaOfyOQH2w^F;o`5q^a@3&m1YOw#<@e3>jLf2>DBJ;xe z1dhnxvi*8bY!qd4x?ivqvc8W@mDCP&N>u%&cHeDvG)j`?#$xFG zk5rtOn~Bv>s2$WP^JIl{X8R^;Z$Y1MgQ+)3`0-n%Xl$~GWrLU zq`cl8)Mu!v`#FMEB&0gh_IYE;Yf{vl)N8k;P}**f$*MgavWapZ(T$6xjEf}?8E)QW z6r^G5;xSMy;!2}@sOyBO*vhR(8;0(xD@L+t3hJ<*<7+lW{NC@$otmhyU%0aRdhS~+ zll>i%`ysbr`q7Yj{?-b6z0BZGV!;Br%nL^~0%&76{`B}j!n=xfAuIa5_cv*_#7tVf zSqd9*cD7&n?ezis`Jpe@`U2@T}Af(;rl<$vahb$sFz@HAPXFZVuT|8A6qsrECsCcdLzxijL8{lR*EdyO@B)113xaD%GWMRThvjPdP{|nKNo^Brn|8E zX$8~zE+)&P{a2x0cK1A5+GQ~$I{dnDxAg^g?wy%R#QDoC+L;RRs|&9l2Cw*UT@fo} z=g4IlpKGdI#n^cJJr?np6oZixCqr&#xXSj*rPJ{SUal+-$l9P_8E7QP)DyOw_`X0? zbzLvIHJ zugp_{CPY!#9(NJOMT@%qKd{k zkL5e~aBf>Z#~rQI_4sqI!ig`vlDoMj{;c#>q(6~z54GS?GSdTx7eC(v z;(>X0$tnq->%>;&K~#TVk#&D@Z~ZCe=c@}IqwG4q=+>@1Gxr)4 zlyfz#vQ$!8EVdl%v%6c4{hZw(;VeQyA3wfxGSC^>oiiD<9h_TicEAIJZ$=?xHS659 zUb}~PRvU92Ffm34HJ=VYlx3eZs?OcrW!)Wmixk=2@N+=VC7wqL?`?KUH=HZr+Rgiq zJ!ZA7RS*3U&8D*tX0tCNw$M$6`ua3=NaXxn{7&7msvWj!FCX;6?JqJ`x_hcsGxcDi z9!L4aqE9#UE8MQZ;d&((h_aHfiV;l98zeHKC*m>*WnaIB_~T{uu57d@!Ym+uC@RP& z!i@b;mkNii7b58L5wFpiCBv5Q@tSr!^?rjj3r|dqQpmpUO}KB2rl>9#^R~%9K(Z^T zmn7d5SGNhMm~l+*MOtnt?DSCb=E$I|qwMxppqv(R#4G#~?~M6}%!9kQ-R8`Gu}^&- z8soMU4;dyTp=b$lcM~%`9p?mttKIIxb*c>Zhb^9`$(^UCL{>&5^Rty-=h@S;pC;Sz zi3yldj$T_*s+pxoCN@`pbepMlFWILZhII~edamf+pD3ibHaH(4xxGlwf+)`Tot!LPzv4vzg`f**sM4F8=E|E$Ub|%FI6cpOz=C-7H|W8vC`0O z`hDfWv#r`iuUxSp8KVm(XEyPiIs_|&<)1tW-llO4)E3NLJ3c*exApO|ZiXhQt}7wM z#!S(Wou1_efAWVZrzX>;#ZB&nVK2iHCBFL+>*zD~!&|}_Eq>y@37G>kx{)+ZQ@P8q zvWH(-+`5ja9i^>)!fm-KEKTCBDQ2k2kqXsU_YTaA%-7${jsyr!&OE$J`fFOw9UU`U zNsL{dmIG@tL0S}A%Q-z7Lx`JV8~KDi z-KrGF1`ty0oj_*moN>NXKN!yk2tODpGK5xmA8=l~+bmV_`Ei`WejIkH)HImM4chEb3E}@O1&FF z>e^qO`7--hreC7|na=!Gx(U?_#rH|{)>Pe!E|*}omNjpAg%RGJM~v*N4Z!W>+^pfIz3}@fDBhH-0$jA97$} zM}ciOXzJG^#@0VcdU$bczQd9T`%+aegjEk~!TI#Yn@0X)|Cv#~QEw9enRq4j5Z2uu zzgf{EaEgbs$L2q1*od(%hL2%7medyY3mAI4V!B!Cx>xT)XnF2ZlI@Wrs?+B8hx4>0 zv8xT%o*VYLq~|i&pOO0zyi|Heeu!-lWD;)32p(M@uXGHljcK0YHB{u!s5pjvO&xfW@m}}G2i;rV-rl}p!4)exyrmu!&U!Cl zjJpCY!A}tN0!b5#4I31m&dgF-ls~JLs%6k-5lmyL&r4;` zV|%Lm;#JV$=5WmE4D2-U*2y-~i)rv2gA&EzAcXfqMxk*_P>!kCt6qKW0JbT#s^@97 zXko<)k5y1~)mL)3#oadBb_x|ZHVrt^s;A<#{$ReJi@POZ}9S z7^|=@wG6lKra8n7XLXrx)*QYPkwD#@t=nGDD|~*m&~DpsA@cu_cHRL^J@1;A8l(k8 zdWnFFbde%O2vSv~2`C7NbPDWX&%^w0@CK*}Ec-h21A zyLbCPgv?2qB%kuRCl?fFrplj`=p7<2=tI``ab$x} zU|8(|ybuj5N8`c0p@bNtklZJ-VPLI%mngPLG0m)y#bYkWve?qiQ!}}6x2w(bvNGI# zcSOJtI+sboGhhg>Mgq|ymV?ppw-=8O-eBT5>$QOPn$J)9Mq+6^l~vALJBd#r)^Bto z$L3OZf0r?oODXUb4VBxdwIj);8Rp-Pm7aZYp00f#xZA~wyGurM1sUb+oo9TR)r#zm zXh~j$jT^6m#|o{_Ll#!%YTRl9lTtuiZ?u%2cv3;2s`sVmz;?z#oOX;C;<=&Ut*`#< zD$K^$e+ZQo=dQ12)j9g33pLu)%JFzOL`1^tj`fsfH0A6!gtib)4adr6N^{Jp z<@q|DV&yz4lQr*rn?I8i(&Xh*VBsFmU#Q36vgCc=`nXF)zZTF*?>X9;?iLa*uHQ=8 zXT>CK>OnFZ3-zzb^F5LkUaWAwl0?AeHxULl*ctbiWea$SDv{%CZfk;>Zubl?y-=XC z_+6I4lR=7ReLZY7Ht=!%Y0%llA!y%o`Qq>!zpd1I}a?!J z@@Gh*^}wsoOAsZWJ@T8|cnU$;S=LwC@b2eWwt^vNzc-K4AX|8-}gI4qRYPZgF`z$~mSeAdm z^uzY)xa$to|HpJ=BhlJUt=@rEMT=YVKEqg}#+}y$&~@ffy|><$;|05`xd}xPBu>BD zBiFRjq~^o2`Ge*4PVCURpzdI@de>VqJa0`!Tgqc!f3`|ckiQoUVK`pSbA%h&=Dj47 zbq$n}q24=G`C-h`!CSb`m(X&|)Z!Z`pp(hK-)UsJbkEHJwEU{|i^z-0+dwJJZ%6&<2UN-> zhP{ibFnxv90fT0D14+J2=3{oJ%WG3$-Y=h?iD}eb0EedTrSp(BeYcqylYzd8zVq^* z|ASFILSGgm?9Dj|&DD+%!=23yV03>Ub$;ZR0OHg?KKtll4aMI!C79`wKGTHsb!06) zPUA5z=4@1KlOx&E6a?chX29YdSiR$VD@w@_Nk&vS!Z(_~PeOA-X(Kz2Yb}SUK}w~Z zcG7MB>;tW=1wb8ju`wZ@GltA7!3Ul`?uElj91h9>MQIaTUkXSh^~mMbCL3UqdhNU9 zGQKTy_^d#ajb`g$O>zPl-+J zV5B5W%Pwnt!WhnnIcuM+S^W-BYxG=wnP1KWc-C9aKV+e|s$kWz8ZCTy+qj-hvvIM& z>zR)HTZhTE^8H|oO~;OMRmmvCll$Y)us~vz&!bev%E%Xb8>JmI(^D%?;O&dTonhA_ z!ybHpADv95CvY^OH#O>zbBY7eZ=xl69)iBoH<4Cfyn?+BAW4*&uAjB^`8wh^ZV~^yp+Jckj@;pUtPT> ztw6NTM+%ltHhccDah^3YnXITAe(>VrSw9*4Jt~197Knx*kcS(H>f-sJ!DhQ3 zuw^s2r%QQul?&?G(LJ7PAzJ za`-jl^>(rtdV@cZJP#bUB$=pp;~!G}$g4`}fdB;(Lvaxh*u8ZU!%9uzjl9juqp4*O%1C;R{{XnbUgWZPjPW%l483(Q9aJmn{r+VS>>;KHdti`R_HH@F` zx3v&vxYS<}2vQXkYxku#zq0u%@b8Z)JM4pLuV~>Io>KDM5pTxwamsj7oh|H(8~2Xn zI<>zED{&YVo~W=?HIieaYq9DYcj|j5Sb~Q7SLtNPS3Y?b_z}oi`E>HGkz&V6O}hbq zer_4>O}3+{4FkTxofAW76W$ay(XoBH2$}qHzg&-E{G@t!E8?aux4h@`rsGZCp9g42 zdTAD%aLs@D{gcU5iQGy1Eco|$uF8(fCb)_oOf<7F_Ln+UZV1>wY2BRp(otf>U<$jW zT?GMuS`E~dtIeBys>#ldD_6#)&|Bi-5%Hwef0Fn$Cs-|DjNIeGP6MkFo7F=(i$c2* zx>I#gNeq6E2NFG01S(6b6IhE z!0$nVvDOU5iY~Fq_sAeVdeL%|!zAquI6k(CKhmiQMrHl4TBL9@drgJFjwk3rK{=Iy zSeG`!Y)$do_PP5aWHDOP?+P~XYT}u!*?HNccj*I?$re6u`=I>C_oi2;Zb~uHe)|&X z2rcowk4o@iVZGma+40xvrN@uG$8OlSAt4d6u#eJ`w;(=*nG)-syYSqVw}|@s)U)*G z2|X-O@DIbF7^B>MV|iGf^@JyVjJ9k-6f#vdOyXPo4Y+ampkNXWwE?Kc`{ud8MifT- z;~OPQv5p7+tU5kB#Ayb8;a0wA+0&RHJ`KwlFYO}%JhE2r0l9lux#a^=L_fHaad)0> z@VV|au3A}B9KF@$TG_JjQ)X!Qs8QpP_@ zCu*JT1J@D3Y+<50-C(G4npA@!_i=A>zI&he^9R7FP-pP>)i9&@lYW=hrR7Z200m58 z`3levGUyBlCNxTgvlGKi3xXu5O}VgZN*IHW8u&fd%d6~Ud3pA<1P|ur5=&%EL)kH+ zbCa@701_zD|HY8XNnhoHm!kjS!ihL~NMJ8`j)Eo_vGu6tb;V92PH8;YTKZnz8 zy@Se&tMZln>k-QV?#{kR5zkeTuk=>5{bceJS!5s&_Aj=9FGu6iz_8)i56nuXTK;&N zU#P#TuL&(nJ^A~44!<6$3s+o_H?>ni1h9_LKtC%@gX{IMmXAr4%Pv!pPTb53G1)byl)vmG_RCVU6Hr&Yr|ppC-#ijGZ+usSs8*b=wRsqy|BEY^2aU(!=nw$=q!zl8uZQw zFgGIw5G)Q_N#vL-c=KMS?>q*9Bt&SGbo8)3tRP21QO*N3D~?d~T` zSo5j2at4dxQ0qs>I8VKBN8pB%E4fW~boVzoEJX$*+{r36pGpq<6kmQF)ATLag17ba z#qO(^QO^Kr=|xm+V49zO=p=XtaXT&I=9?R|bu+Lgzm#;PKzVVnv=gdPTkS<;EePV5 z2rT8|oKIo=XPDfCiB)u_fvw-cYqMSD$QMg%qxoQ9<3gq^MhktD_FCteK1wjp0Qc7C z2rp?8D-AU=@2pRZNhW7a&@ppvGN&ON%HC>vr%j@8dLQ`9_-4V1DoHAcS>1n}Qm+n| z8ejhP6vy`IzCyIt(hCpI8#EE(+WY$3e|jIvBxVn~()N#R^|e3Tcx%e;We5x_;z!cU z7A@`)XU@Vvm1O2{Crm2%Ebsz2>aop@#o|H&9^?v$>X@xemC%jB2xs#yKwG9Ur`m>u8KFO3Kq?Ak>pn9D}EmOR0qj?1CPhrLoS zX}zE!Mf8DS(^9~2k}ni?itcvUsW(aBt!glEd)#uYDM1 zA&0#O)GH3aIHhAmeK6XKo@aQT8S^H&?3)CI4ui0b6?C&q`X~L{*cCZ`LNV3GF(e zx+QX)Ni`mz0!Pd%i$zMJSgTT5+&x5qwKu{4WjxHDmP7?Z>Ie7 zKGo9lT}`!2{$q40vhV?uhZq@*!gTvb6V7n`!$q7ByE6@I^a!B>P?%^X0-d&F92td#VtYswAx0 zc4H|h>%q*eV2f)R|GYw`g=aHbu;GotSW3Af!aAg4c{X59fZZQ0m~0ArZN~;ZbsyL{ z@5I_wkqLJfB}{T`%l^Cn-Tjm0T{xhSC~+_tX_y0Q+B4x}Tnw2PRe0LZ(zd_dJibHA zG*4Vf`+cs)T4{W@8K7)hKx5alGg+-!g8+UBGmv|}Ih85TGOGyo0k==eAY4FW7myo+ z(Wo}x&T`7vQ!wUGpv%YmK8ucOmlGK`t&oYge+4^Dzc6*Zv`FPmmBoK#Sgu*41@bEN ze8g#cuJ}|$05aB^bjINEM&hY%BXPK!>NqQrorRw?b5F?S8M z=J#G)YDMXVG^|q(;yrN}N@MDx7su~w_5y{**5;gaZTP-65Js83d@D(iP1nUuFb3y7 zt)YZ+6+V=I5Pubx&X`TO_qvpP{SD;qG$dmGBf1I|$jM3yy`%8_AbmaZ3X6{Z8>ii~^ROO>lC&oQ5&8T* z+n@S?M%-gZb zzQ=Kz*s6zFlBur@WFWVjs}+SsM5H_;>yNBKlw8Mt#x8uwBZFVvC;74#SYOT1PmI<% zwYWP{2DdAXYuMb%GJ5`y#12|1shDpJ@5bm@aTU`0nJ0)P=-8B!X;v4IWelE|RYQQy zrsl1FuRlm=AgBYMZZ0rCFjnlPKam6nWU?qWn^lujs8PZ0C^t%9_9Ctrpd9O0zk}z^*my)i*vO3;wuowqPxhqPduo!%OO4PP()Z9tYkFaEdp05ri_HV*$M z_spK-0yEchRRlh|ZUCJ#Oh)wz*Hgc1^J8XPFEnrR4%ZgLGP#lL?cYuLP>z&7xPPYt z>lR1ldBG#1(sPwspc43x-Rm+UV9pBp(P=rn2F`0&@0$JbdP-UsJjSTOK~)!;I= zBX8=(oq_TDt@O&MnUx23Fqky>WCO{M{K5|f>OZTxMNr6yFDdR5YvIup93DSht^nF{ z{vG2~3}%0W9g=$32DBsx0vXyGbVR(*Z$^DEj*=AdzPaKgSw+$}$ri@IR@J8KAqKS3 z0!N^ez}#}44f~)_(~W^1B&ITse7Vn5^n)EPatWK|yTT3|7!KYeY#!%{`2bmtq_Iqt z>%)~qh;Lkcq{wcJG@lw!X}>3i24woxsj?yMyZzkXe+r89MH%B?gwWeqf^@7a6Dz1j z$p&9^T;m@8dv3t5nU~1_I4sw*?Y(o1Ajx65-_KLcj){q<0%r-jvsH%(-J(t$jARgk zi}mBaXI{Z{FWsq?{tmck9S5^V$j_Cth`GX2J4}Uty+w=xS6${@z;c$17e|_m^QXtZ zDW&I*C&$=;tB2_q2pZ@O{867v==F7Sj0M)l7fIm{2dz!5fy)gD)U4fkraAaAY5s58 z%9DJwc!=f)MEuu=@!Ib_iHOFn@`8frfN%8*tTcvG#>RdqvqqbIJrZjDD+t9dV!&w_ za=u8QW0Oi`W@YC5^z`oAB{VU4Ad-R83)o$TJ)^DfL@T|g&u#OIS0vAXf#O&f_vqum z;H)Jw#2@=XpuYA;c`Oo>&2gM1ZsN!Q^Dj#ajrhU8Fz1d6e)+MyCG|+*m7)#cd?$K> z$GlMFi*HMSTy^P#-N#`+i$Y`-!Q2T)V=q zz5vhjRRlBr7Q0S<=V?MDKV}isn^r{=27!@9N%H}k2{?%|%Zc>A9BXsYS}11vmvPQ~dCv#m^tVqUf8arsS8^n8I*}mfv4FfARsB01 zPoft(mLN)2%(}Q z*ubz4^wXhRtpD-=2-aaRETLauANeHuFJI;{zqr?n!@SP{5;a*XX4v|GzpwjqpSb-! z?REVk+;iirO~cZ$+lftN-K?Khxb1^Vz~+%5Zb<$NcOB?KQFj7?&a8RN-i5CTcmMB` zi;U|6W*GG!+r>X+bvy2g^g8WrG7K+ z0)$lVEzznji*w~F6_8lB*nxe-H8S~U^X4lw8G0pkmiruz!dhr>u!f`pMnI-Y4kQK2 zeS%Tqw=YSn%#BC2=`s!>P9lD6cCkLQIXmXA@cgZ*ja?fQTUNb)|Dwfs&N0HTT4;UQ zx?x^d#ra$&A4=%na*ZdMQ zvX1VF^qyDKu)pa>olA$Jk&{jIJ$aO*^dx-VpWGf8E+^*_OiQMG@WR1U4%)ked@{pS zQ6KPUU@N}u9Cz!887e$21V8x*fjhwW0T+j=rCS>pSs8@IAbjt*K+x5(?wPbyI*$ayFk zaWfVpPBhrAxVKrLw!pZn+DdUY|AH)z%RzC9tp7J$V?Tf8yfHa}XnfB4q@^>G&_9!) zHdpX#_`9#co|$}5KlTlB)l5u3cjmoh!$T1z+IZ_GF;_7fw1n4{{;Izd68yK2S&5(X z)CLOlO&MZ6-=Xg1O(ZeY47Jwzct_cPH8SW&sM}SJc;f_8vlrd!gNZ0GfG%EwWmGvQ zuOClv0>I*?cqBxFoJ%QSf@S(*U8~40r)%dVdYs!atfcJ3QYNXKX$W8mt2-g*&$J6B zhJc~Z3*W6q0$IF}JTCxbWWjR(SB`P#&T=<8;~(CzW=>P~=o~HN#BPO}S}0=DGA8aH zj?*E6flQO=zaIL(WEvHQur(I8NqW`!=LX4yqpKnlbZMm-oZ@8{*}Ruem})sa!#cHS!fEEcZ;2jkWajk z%Cr+jG3sbKXMJxUWLHFjC=(2@b?nzw;B(3lbv-CK!d;Fpi$Ad+OfQt@1GfBIe{9f= z`!`n6EE3=l92Y&C{fi3u39|p7js@Z{tbZxNj)|hx$r5K?_5pWO(VKVY&rz;C4v-q^ zk8f%I=Zxe3NIlLkvLfPU4o4Yuq<_yv#7E+Aqzg;$>1S@S`Nq>*O6;Y}hdI3FqHPFS zes0(}*2Mou|GLHxRTITq=ZOknLU>x4TEmz9@ogEL)9{1;rCGOTs#AWX-A)iS zt$p|Y{rl`}F(ykuGvw4BU>syJx73^LbbtO5pr$k)t8=la%`J%R+GX9EXrB$-F+b{J zwWvrI&|!#k7TE(d#a5~j^p@voVt&oo*w^To#i}22MaB+m?6poO?lLf(?9mFROBE1F zQ~<&2@7tVc4H31dy;B5F*uF4=YQ`R9@E7AbYVTx1bwXpxi(T*Yq&e+c3uKj>5GiOG3ncAhoQlDMP_WNUoC!50n{m@!*oGq7LUv%C+Yc2>vX? zXQIMB%N7DV;GM2>DR*W|j%S_RI-dHqfaAnOlEP>9>Ute>fz#C^zH#~X93usRCIlkS zGWE^D8n^Y`UNd{$wP3;VLc2?{jPu^Rj5p=mv?utAWv_7<)X8`5IDb(c7{WNSl+u8B z=IVdiqFIlKqm#aSc(M=KXm4azesNtw$Xp?;_l8%FLHzSIweQ*qZ&KpXRmdsc9~%s| zmfxCsw9u8*4NSm-!yEgggM;xVzg1j=2Dzs6Hgr2HBKWs=Ctvo_=hI_IkdNB%GhO|+ zOTLeuygCOqU*AR0EARnox<7k6H>P+qR_Tm^E&q(CYwIvfLv@sbVp+JU;y)ku&V#Ax zfRiqCfJTt(98Gt@jp-|mYulkCUe^-89WZnb9 z99vYWh0BD>X|?bEWkfrZGf?8WT1(pGy=620%)iv*Vb94p;!fKJMVo-^L^c!2mSNg8 z)VuL#23|W8t>(WQT*|%obOP~S*8p*ITysd*%FdyhxaB~hH1Si7qe+_Slq?RoQtZ=i zcZobr$Kc7TeDNEcK8(!hSX7M2F0$dtI7p5-%uXSB*@W|KVS8H+Hz7=0??P>o0eTLT*nA_o8$cJPpN38rP zpQkyvk$7Zke;tj6V$LSaqC!OW>cZoF)-IQUVbed)vE3mBLNFPq5COfJNCqR$xn!${ z(fDdo;LkvfPJ0HwG|vP{5$2k6x2u9t`4@3Z$q%^xVxZ229Va~&-nFrGJa33)p2lyw)4+f;!#z%TmiRz z2_J@yk&V9I0F4snwv3@i5$*%U`a9~vgKqOXX>oBVqj~=nvB9Or(_&fMK z*d&AWuWI`ig&Ut@_u?8omjMYR^r5*#dhxIM1k*2R3ie{9Dii8^-{ZtBJt;{_t(Vu| z_rZWFs!q$V69NHL-N5Y(^>n=gbv5&u@%n<~?~?c2{Jb8)Cu#rV7Pb({CTIB-R154) z)^eWhr%SVtZhEV@e;`?MBK^a+71(|=I>g6)crm{3&+A3)^DGf`{PMtMU$h>IPl}@s|^Np;M%4P5S(ra^sK;MKV)2x5}yruv^Q< z&ZTmnWy&l`sh{K=LD%6Vtbp4yz_jB*-1b@e=U>ug;=J7jG$ z_ciAWe&0`k64w~89!RM3l+=;Nx5*x6I2S1~tQ$&Yr;vz%5)Uniwm3YJ&Q9lxsy3D` z_+7#O9401abwcsl5{ulk#9Mc30ZV@9s}p@Zk7?BeT@rgYI7t#SK&ichnZ%;Sr${U0=n-ZWCnh<#B_jvYFr!;flyv>i7 zXy-y*CNO+uWFWE7Vv=`H9=mI@4fLc+JAQZxl{Mykc9p9Ig9(p#W@~f}Z28S*_fi)T z;30+M$Mc)NV$%RS7pKW8+Xo=}KR{Bd5)gqc`7!|V(h%I8yNYv{!y`!dpeIYo0IrqB z#!cU7{e{G|=~d}l#O*Wl#elY>HVa~(D(+#Lj8X5(5b^Ce9d-^64!yI0D7l|}?{-!M zl60%@y|Cd&|DBd6yEm=|5DlYmNs8{T=#We&C`cFm29ou{F!8E3Fs}m!htNzv115ia z(WhoZeuwLM(H+uPoVt8&0khraM3gnHpZYnCwIrA-VOTWrAfAVrn_1Ae#bvhhb4PTv zJ4{^WjJai&-GBeb?cYA}oMyzE8=#ITUgBZ`^wl>aP$o9QB`_~l=Z*xD1u2@1z8>gK zfS_?l4K+h4J>54LLoF^Y&5Bx0Q^14(G0!>^(G(jXPyMPWM zs{Zk#%*KB_2#nKdvD#s&+#-cCPag$m-E+Qnz68yGR_PNn;eCiiV|$lu~6@d z4>aH1_|_wCUS){a3Bh#y;){Z>JrV(BBPz|96%Iyumx*({Ma(#y+vPQ^T+tlD{4bEK zP`w=%7FNY?#mERO;cEegxP)<}RJn|im%GC%RP0M} zVWj@f+Yp5murOLe?7=xbFN3wz2wji7=YdbF*jgtL$fRx-MHk)xu+`YTFU}YEzrtws zV`Z(c?xwus(ig|YaZ1|(76U)~sAeGW^o_X$7dJP?ly*Pzso*Q=d$Nb*T&V>ijI$Sv zd)RGNDd}XeS)BOvpQZdQPZ&5YLT&F=%6UM4Q&` zVdp`Dy1lN@*bG_U=!lu%t|(r$p&?-7Ipa=0#-7m@e;-xQy;Fh&5<2<~%7m5Y18Cet zMHuA#W&peq*LD)NPyA(|JNc3fJ(vMRnoGyKzu!=3)c6nx^n{r=aY4iM5MFtF5NL-p zbf@C8#~Y7(-80N1UL?8K8J<`KN~aLlGW`vGSL;uq&DCOE+l?Dyf~5=Y#sZ`#+5BJ@ z15GTpm<;Wy@MdWr!avAHpTU35kAHCNjFb!q_+nTCj?BS66n7Rf1ie?+u>3*(AC+fY zxsvt&7Atlo>>~ZI;|LBTRkke{gP_;j>2f$cD=e*@BjqD$-@J>$Go3x;=h4TvsSpEm z?IELklp_yY)Wp68VE3Qqd}gF9}}m76&Pdiv_yKhX0(z;R-ta4owmxn)d9 z?eSr2V&Suk*-me0N12Pife9BFimeP!N6uMB!EFNG}!xqEUzs1#C-lD5k!I*uPYKyGu}ff27cNvm*m;?2z*NWiUD`(XF47UI8zx<7wXZaOd<%CYls@tz)Afp&(nNjX z$LonNa|-&i1UHYm`R=o0mz5^}12zt}*iQZtaIYH-M|hz_ns#fd*Ap#x|I-Ii;pE>N z%lvnT5X!K~iVFz%tDxcG^W5}8#N8xp+PVEst-8o;=823dsXc9eu$-cuAx>Ju>7kgqlFCx;p&^7y9Xi+oK*6Yj(IUhA_#I;?-b@?kegQ;Q862v1hUsElJ_xb(8tA66~lZ^ zlocT9t3EU za)u%f4ji6AAv5>VbLvArUg&|&pIkI+cHVewLa-JE3 ztTusy%&5BZyKssx;x0l9I1m3d<#6kkldowLQA`Y(8jFC>#v{KwN`J0MC9F3mIz!}C z!h53D#pT@9eoI<)x*F*yL{mJUp)Mf;L;3uz$h^p^#co|!c4y+-Yg&eZfCM(vjesBS zW_NjOe78g+jx8v@MmASuw>w^^$NkLB{1OlB;%^~atCi_f(DXvbwc1ZZu;7gnpxPY` z-xzbwd3cpbRemhB1rIEupI;t$6BRvk7}13F(#&t^$iehTdammIJ_#O&pFzLbZ;bWQ z$4TLKIlc}BmLwVW9QiY1=aCE09)v7IT!r6AYnb+`#zKK!x z#;2?6pOE?cd04}?z_Vk=#ndBYENuyyOuEWMfg8Wft4(8XGsO!(+=HsoeXV2Ut;?w_7|GRekCeJ<1u}@^}BP6VMf_fZaWvu7g8T^sCRFbBC`sIr~gI zKYDbn+($&U+$T+?4$=X$WH(OfQey_Ukjqm|hI$>WjXp6qmAbAlkZF9e`pTWHsKwiV zdF}y*#8~SWF{B9Ig9(~n+|YrEUdIfy!KGi^e_H^Jz?uY*jfU4m-Mm63Jwg6Uki8=AqPjMt|++*~|?fpBkn;F=j zYBHaR7i7*_CY{h_=Jbegz4Z9_Ge_8;#gk8OIAxp;xauZhY!A=5hNJlihD4R{1~yaTXONdm|BYke5G24A1=H-w&zk!zY(OQ;lSW;&0FL{BmpuQ6OY7h(D@7) zb2GkTKF6sw;Kr|2C8Uy_*Mlb?vI4~J>YXpHGIvuDK5h@YbzeV|8|AfOyg;bPW!hg( z`4dm|ltOlH_wE58Wt3$xt#f{IdP1|}@q2T!dRZ0BvuZaX5Htg$BFW=sqnpUn{#4Oh zu5FiR{~+!XhuxV2DPh<;AaIWOOzf?8Fm9Q7d9!rQrEnkQ|N4q`hH5yyez%$eU!ym^ z>1~Ro2+U*^o#pxBYkLT#+oUx`g5XD)_Y^tYv7eL`acotlrWr>HUwPPYESTVi0$0YQ zsoKeT5Ixq?#z(E*XyWa_$uUH>pp2-ibP}36;YB@c0#bU}lT*IT}4j z-{}71E#{E1M2LzK*^WA$xSh|eTRxJGQzNm0#Ax8mDRb99`W3(CI_AeWJQR;(a)jIa z+X_sH+DU%a{i474eei_1mS14zZyxW`mhp4>N>Pvo^K>gWUP-Xl1*K#(^8r#@Apcc6 zqwm>kw;T^bxy#6#4ES^FnusKk5Lhwof&@PO`o1tCnq!XL zx+j3c)M{@(o{Ifbl)TU33-2|l4{5ISkPrM5Q99w@gHs82ME5;ynljL}_E%kiG1~-4 zvcHsYjEXVJZy>~l+BUw>=Ov!02FCuk{Pw+-yPD)+-Z=@3vV*UR4nC2;M>}Q!J7ac; zCD9gh>>&0i0t?R0oM^x)Xcf9SLfJC#xvZqf5FBw|Tsw#PZ40H?EX~<1rR8`*g1~d? zoPg(T-!u}}DAG79OsefJS{I-8MN@|qnp9i3oqg37_ZiDpR(3){XZ*#}6iq|Lg+u|n zJhuqRy_iENIW#u*^Ow|3eEDo23}8zhK&|SHgVSW{4WtenygF z7GlJ2u*ZH^RyGe9vDyn+T)OXg6hYyI-NQ9~7Wnj~a6-F?ORV*k5B-&@-tvx5yE~(lymi?qfG!#EU`iaeG4ek2e!CRl z_40(98b~4WcsT)HMqXxQy7KRQ+x1pFZT)o#2l>a9* zcWD`DwhP1EH-WuyGbuct`dij5+vuwKon>#ihWS|Erx04|+;jgR&pv&?h-~`UAvaxi z+vF8{6$!Cf7oBv=N(Q9%x*~&cM7d$Z_gM<-#9?8j5sq;lHy7yRT@-+994{?fh(Gb# zo~kkEZBMZtN7k)kp&1I}*rRY-_RT31&QQkk!DE;6@AhqaxlY`tz2a4{jp*e66Tp2m zRoJ?L3iTxx(kl7ye42&~+FDza@*Zn4!V&z9#K3hBlI;auH(Urfgc);M9exWFIQTm$S z&Q9|peeKIbp%FKR`8Gn}=AnauQAXXLpC5iu<~!M-DpfUWZSDi$+SgcJQ;}JdfxUei zno1gFHbQf#Cfqf?Q1?M|Q6ajn(%s;#9A2bOb=Jj0Yyf5Rn5c41&psQg-83Y(c zV-6*?D%|B!qbdf}E0-r1;B&#CUvfN>c2Ww$3ZsJ=3jGS4pP6XoCv`HEe&oN|gLsTc z2mUsCQb~sMW5A&T_O%Zd*QvdncOlP{a&i{ZpANUoce%DKCYT1cSX`a~opuYw*bEg~ z%e%JGqRH8ymd!uQPwb@85lI5CQ5~?C2iCqr+lsXGq-Aj9IehzSrb-%Mj9GY!v%hg0 z4f!|3+Z>aYP31AWWJF)lLogz*3iW!j4x3mpYpci2=fZjwK$T*7|1f%qT z+wxJjWkDPZpiD_Cpd$i-#xH+aEZvoNm#~ULLPATQVi&mA*7CFYmEUE_d9c8bk;hPi z5^GCc9EaQ+T9(Vpen)<9M4wfqg71+LLCr+{HowqQ^HLH{MLXK*Mla!GeG!Y+bZH0o zk3XGfPIHBP0EWn00#LTDJN+tPH09#xdMk`!nr*W%fGP3k{MLS{0XVure298j z(JBuB>d9iXyu>}Y zh`YJnzuzF-xYBO-Gu*4%d}1Mb(6WPZ{SgLRx*LtYPJLIFAfZwZ77DA`^?} z_H=%Nwbsc^QLf(D=c`*zGqJB;J<9)WKj3nk&RttcdA|sJ-c>E_wx<$oo zC+N)^(k^(dS?F=jRshr?XCBduLme|@Ij##llzz-RaF96IM$S4eEinU#Tjy1jm~4lD~b~{$}JxCcz?XTmyy1Ja^J_iT5r2 zo(<4wihEI7uX1_bN@;)L)jBT!N?^H&M_4;!;I9M@mCBg3ayqm`&{A>Uf$Aw_e^g_y z{=(~F{PXVgzTtb5CVYhGIPSvhiCrkJK1dXGCBy0ok;5-HyW4yp$?J-Cq~_^74PJ@3 z0Rp4?(`2q4YDGBaes>SHeJQGxKlC~#fuTFo4IlJiS0@UvY%6dN!k-;g0VAiQLcbfB z26{JEqky|ZYUT=DuORLlL_<|X$ZTMP!k>!S?P8Wb?@*i*bn13f75j7@t44}r392G9 z;xHH@HmneiCZ4cnqHUiqky_k|zTm|3dQLn&H;L;S$t^0pDsuP2ao0#g@W@_;@~tI! z;GvO*Iq4b#PVGl%sgfFpn031n4nySYaAH+Ru%r`AbkTeY1;xpJI;0O}V3K|Ms6)Oj z=hxUFXgUtjlw__d&vwJESvtg%I`T~SS5kDJ8In~Ac?9-9GASeDfnYa5z0g;GWW|t2 zLw;!PF{wt@HLy}^-rOh0fzzWcq)w)M-nu+w5DtWkRw;&45mWobdNf-aW>)^F!yi?J zMwxeKCLAr=YdTCQQ<}bm%`0Z-R-vPuNX5baydkIcu@f;k`HuvhUYH;*f$r0za@^qz z!EDb3l?=m+YCZ-ZTg(XnVoIrxS1_D~fQs^cr}`XgmKBR~v=`1pQ_QsC<+U?Z)6UqA z-AI#B)ASU(<>wSJlakqu=inWpG5@JC^&Sl^ELA;t$Ku?W45tg-r$sxQL7lCq6L>S* z5kWzVlmsC5+QSvC*6%#-7Ogd%o_Wc2TSSD;J#zR$_34%*dBf4x&eEZG{f^Q#xJRQ1 zYfE+JQFsH(avSBW!P-fdtbRGZ!am_BZB>VHmvHuH6&-tI5y)Ipo@8}QT!v*vykK9L zsUh({6Q#QCWQA&NDVjlQ#`a*(5|TxqQf24QcnL0MZ{%zxsLB%>>j`d^FI^9)^D&ld z2cnGp)61(KuDHy_+&^Jl8d<{KH{(NZb9H}J{TQ)4pF1zAU<(LKfnvF@Cbm|0z>aRVrMT?07*_aWTzcim&Yi$fVt(DCPtQ$kkEaKAmSu`cXfWBRY{#u zOf0e-GEM+Ib&qztA=SH!-}l)$`%}aXEI-|NUP{9zc%hQpA6WbItR37;NC3}&=o_*G z1(&h${In2D9r7mI-QY)<|RCqvXDiiI$`n~IF z7rSj!6rbo1+9#AetBKA??w&Xtfn#>c=`si}uB^;))uT~jS{|+YEOAFzBWC z}5;O^;+PmYU(k>9naoa;$? zyE+f-(bg(8BBb91n5ztwA9v6DA&4*@>S&7{<^`o+QbtaT-**mUIlx4kAw(NBHIMb# zaevq_)nKMV{|6EC4tS`FJxgfnR86%p=j$mKw0j_7%M3Vw)uPp|-JSDEHgej?%q=;a zP$pXegA+y-m3-V7;^oMRL4swsp{4Cl9|S4!UEPo%oLynvKRMhA}U?iso8z@-sBp~ z!-4%V`G){M?$8Owczb^)9h*@T5w2dq*!LfK7^%HlK>ts3BFCWzk;LjBNT0@SY)g+> ziOSKOpXZLxpRb$jjGfn6{xaliy~wWc=UZ=1=Kvpj03|o)C-kiS$%}DN(M!Qy{x?Ye zd@^cl3l7Ml_i8VPj{av<`rIO_70tGxRF=s6O17gGQZhF^7P36@xE|S zOoF}E3s7260~9CVE|5w)Esx|02iQJiC=phN5lJxS5zlhv?(n!won*h{FB{hwaQ%@rAA4+-1 z1z(ASu77`reK5-rL3W1kV z+MiEoPA_ucsbsui%O^o5ynq*wNY=-Zy8{EOQ-}I)^kOXNHp-IRs5={D z4Y|3_oiz}W*6_Q3(0ZNwN}Q0;jM_3mM7X*2Ehoo5UDo0i{-1fOPZR<#Y*cGpxDa&V zzS>>G!&OgLDjNGLSB8)l*Gp+SwvFu_U9vy9HB0y1e z^}A<^r>4uNY+D?Fg}^kfh@Y=j#q7l!;{qFI$5Bd_oo|{s-Q!=qti-wM1ZPLrHE$sx zZtg*Vp?a9RWji0M*U-)iAYHyh%+Kn0dSM~r=a>7*PGsP!eit$pw>Q?oT}AjF9%yL) zp7;Teg{jQRqE=Tg39u45bH5A(Br%GNlNV{++?5Yc0F(1$HyV@qQ~p2mOe74_M z`%Lq?VpKS|5h`L_$VWP(MYOVS^~2+>k~Hn5r-+eWeU0Zx4jplUF5={{S-P?8(1<7E z6hS0KOfV&7R4@pD&gqtZ{qwcZk(!NwNaw%5cLN$4xc+Xw2l1`f)@g6)iI-s*8_d}M z5~|DXNMV6+#{1`%NOSCbvbqV?k62s3LqC;C=y7`L)&)Xd{SngX;%lNa;D(S1?Mg5m z47%5Tddc-qvdJ1d2VI9VQCaScbn}q3n97YH(fO0xXEr;Uo=Y}pziHW%V=O$#I`|`_ z;cc6q=UychgJd64{EYJaBU8>+?Zeq~&QLB^FC~es@0hc}sl6Fd6eBs_n0O=5U?%BynSYl~1)&KT6me<ZQ^QK}K2JKjKHNA(F*gNy`rFsU4FI>}Ly9mFd+MljGPZt^ChYo`vTGso z|0aJQ$EN7JuUtAZD-!;%cMi~l@ckjnlR$MN^~#j;;^)<_mmUE?NO($4#3O0X%iqDD zvt}APPWm*w0;r&Z@5zs6k7McN7oMLcFoM#C=j*Bon@FB-JA;o{CA}VXUr4wH6iOZr zXD9hT8o4-Hu750j2pZH;ek02-^!I$1uW_qbUZ`q|Euoom2NDQWN3otJhtq~R`WEcN zFZyqn#|`vOI`u@ax@_UkvRUMdy!k`F^S3fe{Fz3FbpF?l@0FReFoAOa)Qxhk4aswZlZ4?Zxr{ zm`f5{CuUu26JHE@t(#c*-;Z zN*NTS3l=kWkjY5Fl=~kv(C?~xUt(^7DZ3P4(PpQl8A%gVsaoFhSAS!RDfUIOCf zcg$O_0bx^4d(A&-@cPta9!P=iI-3y!WB{tn|Wuug#E-7Un14wt^mf zdH73Er71h!E;mv{(q`H3DNx?CG!3Q7D%A;4+UCn!12^N$Og{k>b#ja!E2>ljpxZQc zcosAD@VI%e6kn5u<1DqY>TSL_?3m2#&o^ZNfz7dz{4N2Y+K_5KsfN;=|9!(M2ytFh zLd!3|r=g=OYyP@L)va8@_0};eBKJwQ(Zd!z*X7oxq&Q|mW~wl#W2Hq7>tBk+W_LbD zDW4xdNx>3TTb#FnXo@Uw5wE{3CUt0inU49g&bd+@qWvo3oixCW<8=Ko^unm*hVTLz z3;;B`1>4B}QckayQuZe(Na@mTN2+cD#-js({=^N9rn)AOEO7z-jgFWt-=t!3NuZ%M-SrA*nwDx86fR2*+!*64%<$Je<=sxQ{qO}p#T(_y_r9|2XJ-Rmr4eu}`w>HR^ z{BgXG&a-^eZkFZzCISW$zr2omK#)^bL0KF9bjqAwK0xW%g5&@sWuSw8JvlkakX+Xn z=iF1y%#@}r@nmsJxwn{XtICK5+3B*%`iB~mkXUu+w9E%5bO+1$?usXud_RGYQS(+m z=~DB=+K zU!_f-Huhe?=8OSU@81C2pHHJc3I~NI7iRp*ZlMG0@m=ghX_nepkA8Kj3 z%uO}FH$2ZM~HUFq0Q$Ojr!hY#T%0XQ)IT$^A6{TJnqy`RxNyy7t3d1hJ zu=GzqgW3;jM|(lum^)3_=Di+hH5v+aT_d@31%*Prxwg`UXnuo3%Z-8jc zU<2ObnZNSB-ffO#_>ab~C*ViBWw<-0b1}jYEXT&u9M+&BUbEQ$l<-7LT3s7Kt^_>lZl#B zs&Qm34=FL(NB`z%yuna7>H!W~v}){2WIbc?pQ^H}$fF)J=(p7ObPvyvL( zC!0twFY#(bxD~1EZut9Q0t+Vlc=k%4ud~CGV%8aP9zt8UYo)JpnR|cgyo-zrK!k}@ zDn`n|kp-JV1E3%NN|K|&%1M{lIdT{vI*fELN05>VmSm^)BBz1p=6Tw}H06mls$P8* z^cgPjE|3{qD^yoX1Yp<1ouR8?1PqYVjlu6otD9!9?%ndo(R)u`??=63>8R=N9Jv)p zc$>+8_b*qO*BsT1KW*IY#iUKl? z6L|-w4b>^Nb{jm>_+dyP^68tgg`+Qc_;;-^S+3Zk*ZNII@XeLr+$FXSfdE*(%hmnE z>UUW5q=p`kpl!iu(^m}wOYzSIi@J6OX@R@Tm-x7f^CPH^OYXFB)ZxLo9v z-`n=+koGpu@H9a*BwUd~T?TBhnQ%ICh(f@#!7&ygX5uZ= z_&di92Sp)D8|8eqHYd)!yETb>l!PFn4jW@8>a^0gWa#Zv>D?MB@N1dR0gge|_aNNt ziVP_qbdMiN>a&jS0mh3)o`3X?dYt>5tI+61F!mswyZR{Kd;od=6@k=-8IB@Hzr0C( z3QN!b+3%GVPx{`n@f{(0_4;VA>N0ZuR6RBnt?Y5j{kRyG@dBu!yd2AsEBNz#V2w-t z;qj?x?lSYprd0jmJ^LuY`wd3e~+p_Y!k6o$whnKU{P} zh5Mqgao&*#0}_Cgnn){~Z(D_0i3P zAtfPxkIAZ1uI_3l+n-bY)P8f7l>4nVjqffPuooW-Gp+tjb9fc0`dSB% zA0@byLZwJl_o0$KH@_kiMu(RC3BUfy!_$dP{}exNje_lwJ9e;3jL`{TmapzHRLik< zfpCAippfR?iyQm`bAY9;Az+N6EIcX?a0hf zM+^orMTP&Q&taO=8gqQ&8=Ie(252hZ*`#u~$~_rC4SGNkA?-vac-^H-@TUU6)o9~@QlpV6Py{SBJ>V_K}tHIYuw{J?e)c=)nhdG z0!$v6<7e=JqB-&}Rp%)0UW{R`=jJLfWN!%&K5!E2sZ>pc3k4OB3^=f4(ka=#bNJ4|yc5oX!IU|s_g>%V!vv9F8M`b5l45u`%z!=X ze?DRqiH_Q|f5AsJiaf6!mW7jl=!~3`B;_i+OMku^n*Rf<0?+97}x>B)05(&>s|YI z7S^9e9EphzekQGb;Fi!Wi)-E7Qx_#L`izQ*knw7^v9|tpOkg)HpaM1wJMcE*LXT5@ z5M{qzJ5T((-#T@JurFfn-(Pu0=vAMrmkwqbxmF=ev~2%YE`6H1&rGVCH=VK-FJ|pj zO5v?Q%=pMl?Us%M0` z*=UyxZ|i`6pZxJCpN7ui@hnSJP&*=O*qJ-?zb<4TLP8sJ9Fpk&PZy0oqpvTI=oqcT z2`~P9v@g*UO1pMt;+HO=iA|$0QG~a@Q$*wj-xgh1ui`!S|IZAN$-_CqNIQ5QioT!& z!?iOtE|+zlIUxWIQwy`?&o_2=NO~TTsq1FUSbrQYePnK-CBgc8k{KLbaBK85N7jNL zW!}=YObZUm?>P>V7XE zHYYwc=O*lNdO{r@{rR>yU3_O$!3IsW2_Ok12Qmp=o)-b5Ih-+wO%^+UkcZk2fUOs;Jr}^?wlhRoSuK}E=eB5 z+VLLTcw7`&Wj^*{S~wWj;iUS}U%6{@zDv!bNni7Bk`JhQX)?%+i4Y~E{eN){et0+-PjrUa0;m&$#4Ot zV@-1iqn8IKM91Xe?4S@}GgVln{;@0%H__KYV_A; zNiBj?I*Yd-avW;8_+#;)uW0dD>&YLkI3Knd)7X9)8SxnUOcY!acKo$rTdsY3ElU9- z7jSlq9(1Uv7wb>dA?#ShbT1+7DPw6x-?X0v9m!M-k9p;?Wp#UbIr$$$t@MKrvaKU= z5@rcOK!wdB+}K~9$>rJ{EDE(NSv|(cw^`D_O63=nKn9f-)O4Zc^Om7C9~VOx1W?%X zp!MmG$Z6@Ihd^Yca}n)j%(Vb0A@@dby%2bCKg4?|I1tYrM4?SGkZS&ic96+ zafYId-S<*vPTPj}IW}5G6L-D71|K}}M@w3}{`?!G^y&CZrS-0*Pn z%^f6gSq3N(rMr7?7;Y#a!Psv-{%~|plorvflKJK1cu(WosHYs?`3|Xc9f;fglC8L@ zZF5LFCi|7`-o51!D|gECMI-5>$!t!A)Ym-Gx;wkJ{;JsUv18oBC#PyNx}u7 z=ifkoF-8rn72de^UoY2(B}W^;&Uv#O-@hZ^&oX(>&|k?r9=1ZP#5)Sd-iwI7k0>h< zHqCl6YgYJUGZM@3Dq_Lx^MX_t(j4vYo2vKA8=-X~X}yB#Sd!L(;z=T#G_B0ne$A zK@)_;e;MoceW?{%s<;?2vG4PvC3sx!7Ik7{bfkYGTI`ABqjvsp5r_wz^9tF>s$u~5;D&0coRrb00R{wu_g}BXXprPxZYIG_p0%(y2HOad$ zr;P-y$5}}7Vvqv@}xZYduU+mlVG>R`_U}mN`YC?iG4Jb=yTa!tA?}3#z=D zc*|N#r=Lp-0h(zy;pD`bY~EmJ-hSj#r~qYZ?h&TS^=Bdq$yek1DSw$$&hLlwphKye z!2R}vncKRVh~bW=0c~~Ha#(cq_|G8;b!C@{cOpPde-yyD;;_SdT3a;qd9lF(0K?;DDb-%~DYX14j%!RVNG@ zECSC`6Ue&Mmdh+;7l)*Z8kM`ka=`J}xy+2u?D&gjv3&jvjJ%5BXr=M8fO^nh(exN6dQol@m z`Jg{3SLWHkrth&vuMOpwZ_OBCYsBdho9ggI;!LoAKRr!bNSlJ*92w~>QHy$d z{Knwpg{1P&2t|JnY7AcmieoG2BJ*)(w7JSFiUV%ZVr@2G$khiX*(y1H*7>HODP9qD z?1|mQndi%wCt8lzwY8ugtqc|+|8D6Ra*xOBDv;+{63~3v`64d#?l5tf_E8jj4B>GU zISZ#W`|>-4ra_*Fro5m!we&DO-YJ%qc&VXvan5dF<%8Ygtgd9iT90rDh*%PqEfZdD zauOSWD-M;&JUf_5ZoM%8`IMAUB0YqR)GfBtKKT=zJch{UX$)Z1yuP*zWE&2S zUp{}1A&rupJIXhxBBAY}Fv=SIAWAad#*?NfHuO{dWR_!~)YyKlheCue3h-DM>b=hN zWnsrr(!^kaxADl)!p!@l63ls(N&-2Xl0xXY2Q`YIv6|#QI$csnDVsioy69#(--9&| zJP*l{-X&3+x+coxSiq}%#8k#HSNgPEu&scoU*|W`NBeekmF)BAzKPR3?aVx(A(bJR zc_7hcG?7Kt;7KIs$;~=B%fDoG%%jcnRM)aXyq+0R6jZ+Ce7o}9_!FjKQs-beun zV(%s<2_`{O^O;~hQSsCtKmI&gYIaC8vhQIS0tG-2*Ss>Vkv!sd`~~} z{sD?elLJVrJEO*RH&^{Yg?YAjtw-(Rj3tCy<$7K2j&NI!NBjO71+$3GzT@dYH$`I7 znt4t6I<^~+8BfymeLe2F6z+5oaSiJZzk4=~ija90Ew=qNXiuQ|PuhI^*;${FXTw@3 z*xH#pgJxBP??1O0B(aW_W3qrNyJ&}w3K;Ud(b^z%l>ZA$+!u;40e*XYj=`o3Q?FyO z2XX$1=`4@L!qxbDtp}nC=5-(aRqaGxJ^fs;@4y4a(>Q5BIw4b zAgnns&XM4WGbbQ28U8XxtgF_mkbcSBsQK)MdcE%<}K1zZPH z>v848(|X`Eo3nn_w#U0IKmKy@gyQzTIn0X>a76qm^-b-?oemSnx1OSQ%ZN>`^Fu1y z)WQpk|FoKcqvC4c2$7?{gF;dRN#?9){mk*v7Y`;w9%86Iao9({y zw0gp5&Gd*PHgu%<3GZ;s1m!-x0*R*mI98ctp2JQs? zX1%o~z*X-uUGotYl?95#7?kmZKFNb@w@Ui!3C5)) zp5(AcPsxVE=|9d~{YiFLHn%SfDddS~rr$k%aX}Wff5PfIvzA4Wlz2k+M7m1R6||TT zJ9pD^HfdtFytuRR=ab?_&xuWvsOMb)#^%30F>`#gol7$NbyH^>^ZS=8a89Ep!U6H(e3sx6 zdS|dBhtI8q&TetRLtTbq0ef5fiVROvJMfYQ6w%V%SwWf$ldyz!g8rOr z(jD?}{?l~@*yu5abvdE)y*7P;O*Vtt+Ske59n}CP;uK6^_7QDz| zV?<8U_~;=iTgImW%$I`k%wtoq2}xJ)Ys$!)r}MdenQ!^i7Qb=K7V`Z1oMbpD3rCi9 z!OTs+yBWBHjt(0$w*q?CZ)&?c1%YbN!BlELQjIi-4RW%G8b=Q8Ir4AUmkjtb$ zvHBX$bua-&)ba6j;YBh2K~;5%IhMF z3Ad*7T)xmX``fTfSo%bt&esLsL3XH&nHd>W#sw;s%-qW5$ia=y^!^-M=RAeI z^>2cnSn|V1qDAkNErLrKn@;-nl+9g=l@8_&lN1{v1n#&6&vQC=qcfyzKyIO(^)izl z?zgFOlWsvIX{f~^|3u<#dpP68vz9!)4ckm=>25r*K;{ZX|FRsx8uoH;K^=?}ErtxI zS@3e@gJl)W0qZH>K|E&?*-hO(j-RKWbVIZTrBUZnF?ScolA9}B;R~}aL zzmkXI2G?Rs5G{JLmgv`VX}RkdfskP}#J3>M$Iukw*4HQjp1UXMpL%&thbHzE?BvyE zc2!R@3%x-JvARcQY7UIlwz-d(S@zgLw z`8NsnK%kZ985|LWHP#E=rIrfVm+YDCd_X+c01=AG_^RKA?PGLFLIERr6IR(X`%Q@^ zDRXMGdIZ@GgjF)h8jBoZ#7%DDHq>L`Goz@5XO*<>n6+5%?>E0*q&Tw6?`P}yI5kjC zK+z80x2cV4U8+}H*{`288{UpN;@ABB=CqTrR(WeIQw0zRFZE}#vy&B=hX9HjHVpjf zZ&-L+0KSP@t;1#PMHl)I@}t)HZc4HYBn@@QLj2B=m-XVa)AByvB-B&L8wWvQ)jM%3 zH{agkv{^t(*oRkR-{R*D%|F#XeNW*YX)KZh^<9vjz)~Hx@g458=37DIj}P*(atoL2 zJr?z+bb7%~6!aH|rkv+Cv#nAh2=yo zj#!U(J`;@;ymr0cZNw1Mx)Yp8GE_ELl_pQprLe!*B}^CDG_asVte=<>+xvxE$wqS0 ziTM$=?}_oR*saamBJ1Y2FpM6JAB2;-5rm^5q5r)6*R>W2JaAC6_iK_t(l*73c1RZ$ zfo1*MWAshM=iYGEPeIs-&L}PQ?LT4yx|m0?kN+#T{P-f8^1Coy?3+Z?qv&?G0xNE! zSW%g`7H@cpY(!B_UL&f#Q>VLXD4^CS!NQpgg>i_|Z6xQ<9C8zc3_lpEEV;qC+;Czq zfwq!INnORxgRYS8sPS)qos9(N`W%&BA7c#r*3j5DeZV%6gVR)xzPbRfo`5f?idu(d zahT2YwA3{jB*twn)qg|hxf$pj1CCwqxqx~MaaXa87Dl@< z4f$gaOJ3x;T}lQ>ddY8u$KN+{*qyb0X8PXS*IMOHnQO1lYyz7sZexO7Z|}bJ`kf2g z_rVU@`Ruz7<=;L^TZcoN5&a)-7L=|R+1T%Y`A@7NO2N-g^n%JnL@JpefamAg5*5}?+88R-%KV8tyzG`pQ7E^jmU>yy%x{{ z(^IHz&~oJiMVF$cQOwE4RFy?TCLBpqnK^JEy9WQn9Oo>n*oAh$Djj>*?tb&D-3_yO zLgcHPDU=<)R>?~%iuX{Zo{k#GOso`hEL-1^p-Vs)^_YDrBK&yXT(AinFcnZ4S)o9U z>()AGPPT5Y+oAgNlg~r6`cf@3FDze+(p2tM7fTytfdakpn)dQ9Z8PIqj2XXg#ER)c zdND@7LAsb$jN%WZ*LE~d*&k$O^C$+W*S!9pL5>0o?q_=50lR}d%y7ve6lU1!z%Bu+ z6ocGc`P=Q!R&O-GL%aR*F1_ZgPdC#olh%^%3xIx%<%1H+P3m90ZV5sszLfLZ=lAeicl834HE zA3zeMbYpA{T~47FM+vEXq1;cya%20!`gSYbNt&%@=s`u3r5sqgL)g2!8rU^uyUo#Ax}g&ktqr)tn~;9|I_yuNkn5{gOMl z^JSsftC?TsDkfqfUzYNz^?>F%E6>pq7U@Ci)_p|lmLbebDbjrF=vVbn)k}}>$X9Y! zef=;}6o2%P90ceY>-W!y>=hx_)kl#ZrX8?G|I(gyKX3DrcolRG_%w+nFVvk{daV{2Y_Uhs z^^9gQ0UK~jbtl|%DJLT^zRrDNe$CSYsvzsDs5nFBAO3s{UD>|4wB~7qD^tq{>V91d z5HwAC0d6evTwI{s_#CS0U@5kBd;T4fe=s3i%v4@LC@+51;=ji^ob%dY;UQ+Z%|aI&lBkg9v=CWtb`p!cHbk?R zGmTSun}*8_{!9g$XfFk0dyCg^hDV3H_dad4t^ZqF&BpcX>>p3gbH^fKTOyh8>Fe)C zjY=>_#POWjU9>ft58OBh5!)^&>h5}DrYuhpA;2g&ft-^9KTFRY2Dg(L)S$X#^K-i$ zedo}9o=n!NO6BO)nkIf)%&(SGuk41j1F8>EHGbp?qPj*o5kzD`+Ur@^BAt<58}}#7 ze46oZ6Vl){;k*tKOY$}b_>tXlPsNd^K0WFrwF@^VqYe*#PYR%PGK6)Z3Fzg}59 z$Gpyo@3<0*N?5yfi2xg=8s;1`zYbF6L?U13^~!WW2GH6qu*|s zB?YRNhh4B4^?Y$x+KP!v?6W@!)a8)PnAGoi7z~)-Ho+wg^R+OyYdPo6GNkwkcsBk1 z{Ov3~dwv6O`xX8%NA=KgEr3N+DWde(tWJj{RM{m`Pj60YvX2edin>|dq)QKB@9#Hd zNCYZIS_ok{S}AX6ReROq@id@l6jCkTVEdQrc>1su#bE>goPe?zW+gHJ1?vJwX^?KnUZj?dc0sHJ@s z(}=gaCrYgG_iij(?mjEna{rFLOSY|uy2MitJPBrTpSu6yhtMG#U`#-9iL7K?0<37d!&0`r$$Zc1S<{#}LTm4XK ztw*FuX4-oH9ukat+SV1OAKgp%!tmX7UuO?Ct)O7jd!5*DYIJbc8%pLPAVkL9`lqOeBX`(*K z=9^oVDyo9TimKJwT_O)sW1X9+^EXB0k{&SQmM;50K_T!Ftnx;7y%)1Hm~LtRGfojN z%_T-0Dslil74rWAxoiVO@GIv6GRooaz7JvEF5`B4`U^9SRF&-WAn63fUoxN&K~gQY ze_;>n??wbt-z%+(=HttMZ|EYepW!>v%={@+6{Wn&Hk}p%K$4ES@i2L(*{W|P;fVFM zM;Jzt5X@UM!B4ocY%%Agm2jfbn=Yek>!u>0lLImKfwt@mD_d7&bLXLpPr6kCy<8;5 z0xNUX;0b?G;V=`l_p2ev>wqrp$F&gu`vlPkF0aSgC<%e!UV-Vk=|gGT8Z`iQff*^U z;KYAcCRp56NewBzMFwZA3j(rt1uLXGW?v*!zx#GRRGrD^=SYS=On|XLLbvnq9s0wV z*eK4=4W-$E#@04oR@!*KKkID|4DDK-3@5POhXMTlMZOASesl_Z`n8mol??qcH_^@? zinXTsQ2FEn&9W$h{07!EskIrr5YfhY?aJ&t>u|^YXe-=Ua4mfA$F|isC)`-qy^U<4 z$157QEQFYvq-vF}ErY9L8`Cwju^eacu%?ZM?rULp!&R02j+JYfj{8G8Hn1? zuVD)yk9{@odpay{D~?#_$B+k0=8xua6Fl>ox8dFE3H5zPNojIQw1OFmhwXgKm-R+9 zlB#w2wJWk$$(MuVvl!`=0RBHJPOb|&a`BU?2ySHI5_+fO{p)?R&pLU-6iRA(EH%f*$M7(@4xN!*M z-=eMb2MfA^_^I@~8#S@9NQT;Vht|}5<@ETkwF(O_%#1`htBZb$TB_PT#u`_5#54u9 zWBlxp;J}_Cr)F78U+GhQBpAIju!TO1PMz?}#bLGtOjbO<{`nKD>_WO#z-~^0-tJC# z>JBuu?1Dofkh5GVUlmh@QvWi84YC-QkC=-E-EWYQ45sPUIc1NlPelpkHJ6smdRV~5+D71wMP znN$5(*8hwu_H#mrQdME*kFTbY>Z|WpMxZm7kx)%5TdO>6SS3-X&HB0yriabP{(!Id z5_fKxf8Q*tzxwg*%F0XEN-K-E@ufPjy~uZWhXyJRUROmHSi&u|)Zls;a$@y-Kx z+}6_OMvIjC;8xov1LU|wI}!d3$(KPV{d74L!Zh~V>B5Uq#Ai{07TyZzkfbC%0SR)z z$4rzAJOz?3Z^06mAc_6aQR^*-5PlR`8L&_Hc&K*#ZI(XSNxZ ze7USH)UGsNAHx3n$osP*2|qr8{UfM<=?F?hdpvQ)1>|OSZ|BPq2%66gwC>~v?!7!( zBOF=g5fGRj$~vG}E8AKt1ICiKjRt?>ob7owu+OWZeFm-qFc>oIvp|gfa8wm(8F&D% z9-V5>{v66%o?beD3r%toSAkkRtHU6%Y|97JeW6W^txIe}?NO0#Gzm_OXV7PuBXH`ph zd=pMjeVS!|E#$!LcaR^vI2UT7@|!OHG79=DdJpU+_RcY=aT6#(+N)a?y`_+GD@w;-C;;MH6=puvH=C4p7jXDIRQT?2`rL)YLoyU|c*)Aq)pRi2f zKVylc+B5TVu>WX9w)rRc?Ul1VHP{bqyQcMQakMA?Kleiugg&jvc0?C z{Nrxc2!CO6lo^$;P314dPQUpox`Vvfg#I*1LWG)5U7Gik0Rl(&z9$f}>;%#tjJJ{`I!2l#9vC}%Q@~&Bs zoF!1c`p7)pAD*9kPknA2&baq?GZ~2IVo>_pauEm_M);?okX8+e^9VQy;Dji_GevPU zXw^8TLW8)iHHo%{CtFZJCN_mB_`33^X1amP1cq&Es(s~58C;8ezNvVvjS2XIk#v1kayU~xwK3n!|v^cgwsCXQ@`TY2N$oTEldZ(y=;WrzzjI5?s zp5*J*8^Z;PZ0!Vk(eGVHxSs0pS5hrEz7E>Gn!gqzD>y{g=})IOi0yFl&HHJw6d|uh z@+ERVGs?uYikDEJZ6@E=)LUsjf@82~7OFZ9g{(dFj3xwU^+as^6KNpz?qIOar>5N$ z+MY^l>9CR9b$*Y<%(TC=8qe1manI#{0Uc;aNkjtUkK>@@=;Ngh$Sqwls}IVA z@i*YJi^kgt71a_pHu6jG7OC%z>+XqZq)%FR32&{l*H9;)a|N>_pxD1&CxM;@7VXIt zwRGF?LF%NrU+I1M$D>SUH7~L2B3#tz`VOmaJ798F5vyv_gVnp zoz6Yy_&kAOeRGD;`2e>kKJ%T`!KCD%pPs&a1x$u~D;n8o_SZwRRbx4DoXdN5FijH=Bt=xHE1Yz78YcM-K;5U#|4Z!g01 z+Cn@qeilh5V8Iz02sF7%2GR($-2$fd?8%R{`<}WH-3yq~l(1=4j@|l>0U9p%9Q~pI zVZMZPpE8O!<7Y}N`6Xwm*Sc1=kQ)Q+oj0vD?{sL>O!0nSg;WB$I=jn?Tx+z;b@)iG z;;A|9=nVz%bs)p@r%Oc#2kg{Izm*It1Ax-u%?K2pb2Ex`_~!jUoahss9KkNGC=~M6 z_h*7Zen~Kfuhz{e6t=xzC<>6B6l=d1dFGHpcjU*SOWV1GyHl;p^jdFad62w}X;&3-fGDbbWzTu+jPTT zc&j}lxe?f?`BR|S^LB4b4Z0mZQu0q)x79q6J-!==>5!^;tY^Y~>>wP+kD4dDvtOsi zHD@`-?%KfGDQ2|@z_G(99LGGbU#s3eK-5a2{Gqe8&}qPRM)PjKeIca3ZVvLS8K;Eh zb3!f|6;1}zRJ*hJO(btMQ(IxLGt|=8l7uBS{-kk5v z^us}c%P$>mOYTZp*Sal#&=WnAIgFE7?x3e+SA1BebqQ@g)rGyX4$*&T@Fw)`0>|wh zGs?)FBn`8TO>f?_pEgg=(}@$Cm{iKvGmA!Zw9@q>;BY`DIOV*=Um=u>NnM_<#(K&r zc?XSac#g7`=+5A z9XIP`BxS*)P4Dh=iRh=YR^Un^kLopbb*<|{0He1k+iqc7aXd#03f<%n2G*ieQl41* z6axL~mHj?utytXSbM8@U!|ZAD1U5`gRjt@ss`1CD0h4{`UDcfKZC^IB+D#w7RU^6- zZ(|mwd!8E)&^wb-Mt7ARhrV4Cb*S{}$V$2daP!JBE*%ha!CLML9K7j!{LK)#HR}o>ezPs z+Lk9_9viDCqyplvJ<~h-a0h2S?BL|La}rs5V?$i>SKH~pb4sg%inL$7?Jg1a{kQz+ zi;Z7AJtFV0atd|O%WTE+YxvFi3x!80`_a7UBM{Rd2Kz>S-~TqV>v*RFV4bxut`X=M z7&0|wC*tN;0=Q|Y&`G=?2+lhlh<458hB$Z{dL8_)(ILcD*eK@xN|2iTQt zpu0`XX``|Db?)h^Du8em#^r?^c48!!z`j=3*^AR3$yu6CR0QHoXFSXa0;h^JU&WN~ z^$aF&M0f~&S+0au{Me)Ip=-ah2oY8FU+Mly!4ti;;p9wX+6!(s7AIP*7El)sNz}Q)fJ*{+HBAY<=OF(5_cSQRo&2jer?osa=5Nl zTh%*+)g+;fht**G0yl*4`X)}bgDh>0bS9ArkgGj&+9@JnCdlKM^gjdtK$syVvC zBA1yV9Gk1f1O683KcQo9_Xp!mu8%ymX80q;j1)*5Y`n1jbNb5HjzVTpXk5TS+`!dT zzU%rv5fTeJd0o`Vt&wnyEj!TeyR7`juSo>Z&x2`g4yqro(S`!_NBJ&!l^^9Sul_S< zTj+psbW|Dum~cqe zw<~xfxCYnVm&FsB1%v7zq2?`uVeQ*cRLYTY0XL+ zgVC|M!&bcGHSqfaEWleXePTW2b_~FM;#NmT%<0=XjD>6aj{o~&GZHwpm}S^U7`n4F zWlY+|{&Ok*Bh(!zzyukw2s+~{2+|~%Hj);Ml&ZOrbez7A3rDL3%9}6`3$qyu#G;MN zV#a*@z;&1f#tDepPa7yv6c!IXtIs?rR2E>0A>boD`M-F3?|3%H(Q4LZ2B1Vi zK%VFSDRxlh7!`14zfQ9CT-{W}I5dB*=9QQ9pNuKn9w*_0w@&VVJQ!gA8bHSjO0HSV zHqduwX}JUasn0eL6==kJHq6bnfC{(T3HDhyZQ22)4}4+%E(-4Ue|_Ix;3!D%HB zacpljPw)+!mceGALdM240n`iF^rCs`hl!W}_Q@pBvFNd7FFgJm>P+o3Dv&9mW*uAI$j=+Q za!r@d{NAd=3?usF?Hjl{}s?GS_^2zqh)j7tx zrl#kKyN(V%!u&v|g%ePT?X8?}Lat{WkPh#X=1OT%PwDu$_@`y-6+rAoY14Kd$#@aI z`3kd19^df<@f6wh@W936aZpCsuaX88J2$7R&bvg9j>lTP2!wNLT=eCk-n?g?GOcqk z`N&%sKlRes6Tyr)LKj^N1ea zjtWQk4KI=Q@O0FlClxh>^W?oCyRK1$ebD{u>XEu-2|JB?jI`dSYQgnswyUPp#Ca6^ zvX-2H@?kikb$ophWlTYCkUo(ZM7*ymTcO?MuXlfQeFd%M%AtouPBKEmjcBiw@>R_p zYO~ELbPPSjC;2`-0ezqw6R#z6ljex+Z&3pID+OaOsH~c7(DSjq?91`hn1M;jvTM`> zyQA6(w_ zB9F8}CJw<8Fx0K#(?H!+C=Lr8$oyO}5LYrwzfoX*{DZl2=yI}b8WNyz2*ikklAne` zE(L@buFUnMIPMtA0(T%gKEo)6mz|YaLuz_(t1rR+C{x}PakLUw40ak#fxhHt2f{@X z=X1V^ezz8(74>Xru&2iPM&7#PA^s_6bosiUl}57d^)~h1)nhNJs7tP4&r;pKk;|(F zKjW7YSKaF#Fs?uUmd#qs9Bw=vdL)~6{R2%Bn< z{55~xKUFWK@AlM2OE>SIIkyh2&JDMO+q)$qNY{|ew6O!A9F%JdmY-6+OVlaBdyEau z>7~o-jmSM6ah&AP+l$yL-b1YKwk==~g+g17r~xe(ggyT6{GylQ&~3-hzV+IqRK6ME zYwhqK-!!|dLlMhjb}qrDK?$l9gjz{jA_tZPZY?iWyS~2C$Z>0!Uki1(L!%&gT1fCd zyN_+SJm~8e<|a!iwGh*&KEU~FFH-JJSq!;#ht7KJ#ochT;JJj>NPtmsN7CBn{6EweOidBt27qWu#l1jICb^jll4g5}6BDmQR5GBZ zrcpi5xi@}(ba*U=f;oRX%YEKh7S7o*r2Q|BOLo~$GA1%Nt>N2{cDdP(%FpwpSV6|N zXD>tqd-VTUK1~)HH2U~ck#s#k1RQ3nz52FQTCDx}D&>CmACg9bo< zJxWr{&zo3!rZc_NK9zCZEETldBYH=kO~-johf0oQN5${6e*mxXVszn~kT0K5;Qd`;1!g}DzgR5fy1SdPcyE!iPq`MD`C@n8tZnXBZ=o6I*Ha~8 z?kKlI;K@)NISy|_&Eh| z7JEyI!$U>L&slQIs6XnBQZ4z?&(`$)OEX>}JnCsVoyC0DcPsuOip11^D~W?Us(%%E zjI_;}uK&=xnlP(DUI)syZih(x4C?LzDTaRG?p3Gn1qWTpYZl#o{DC2g`02&5m8Ts@ znq`LT1RYfIwC(8SUudelx9*>H&|W*+r!Vo~tms%LVsebM2O9F*89`ySdXV-->G#=n z+?p@OcW4rtFJa}o(tlM)oR(gi`{|3LwDotdfkK1Iyk@^snPpFWT8?IM4gR7(MbJeM zDzwX`Lak0?qHmk_FuwlqzZ4iHsOfW+=@PN*vR7fno3M+kd}YbJLtJ*2bjF zMw9*;DX=h3yh=1-o>CIZFsV?)}`=S-CHgi1XxXll8LH3b^;&R;GTu^rrWdX&5a z8-2=hJLeUH$gjMSEyJDjy2JXKU^X)&CLqCB7|4=8H`C*`N!PfNE*VzgDn@qF8PZJJ zz8ZJ$^=wSJ5dcG(iD&~V=n?p*=5wTEp`%o{M!-$GyV{*<3QLb}a9xo9P9Bo`3td@c z^lNB}Yj9-+jk4xUn^~K-AzybUoU*s!Qe^BOQ75XkA>6)6Q9TyvL0!3ZT>57blfSqZ zii%f1)IkFFk1g=5-MA)+ngj(mRUTnwY_n!rrq@A9AE`j1Y2j0P;h!L-@3K?$dC7|f zouDEQLKIUcv^iJR`yK42c!x86a$UWX+hpvAyhU}ec@=up&U=0}9+Fj5P2UL0u(Q6C z0lU+tHu24cR|H5?*TM9)d)Z|SN=QT?poR2!RZI@MFgKQ42lF%*`txUFuPB{CP~t%fA~&c{Yw4DYZA02S7jHcYQVmDY z3hS|upYO7rW9!wTFO*DD7}Qc6=5EUHAia*Nh$@SpiAcNEgO9tIl7r;47g=JKIp`-v z^kDk*K?UE5PMHcySWmBaOqlWHsfzA~xvz^Z|4>sd?Qik2s|17tfE7h)Ai#_#kH+gM z{2mU6rW%cLm&s$h|6*ANH*FXEuhp1U^Vxp^m0Pk#{jhwRM_!2XZD6_y8tp$ZcU=3* zSa*R>w?4tr<~H$hGTL@rb89dko0z#crSX8OL!sRVVtbXON`$rEDKt_?-WwkA)OYoP zn%=&E0{Rxjz z$^Uo2%Z}m>N~DUYP3p4W4<+d9)NAUz7@q&fJy&9P9~x!PNNS@KGH zr*D@TcYjaOo7cVs=~DG7jqS&cr=^Xmyt_HExj=kgF^TxHwd^qZW+Il6R?>*1KgWZ{ z&$l1(m0^jC=XCYUf*7PFu;`P;H!E^%o{U4ZXhSB zr(Pcf|3t>e9~ksTb+*T4e&4b?3>^U!84*K)?HL?YsSya_A_INXa8s9u0Wp{9-zz?J z5qpX22~OD1&rbEP+7Ws>G4!jBxN|#nKxcL4Q*9RxO=~C@`8SaTSqTeP z3r`zd^fcurHT(s{ux|lBz7*T~D}Ma^(~=QS{@)Q{kY~ZYHFcYLQ=avCF$RWR$p=+! zm-JzT_J)H17cEU7y8Co0TWCM5zYiwcTPLN^vU^9Ybq^E$FPIIM*tU2p>0c{)0TbO) z_kWXau*CcH#!B`?e)giY_y4BM$T}fDq-j31T+s;l2McqZbmtGpsn!!`KA+jX(>p^{ z60AEC8+wz|yHKO|gw1y0y_?){Pcq+C015N66VnQXfvuw`H=an|z7Y9hIBzcjdanP? zMNt1EXu#cR>d%KJlZch@O!mQpb_pgNAch&-+S~Xoh8mW$OFPBbGzmdI)YL|Gzi5#TN!nuiB|znr{PucUg6!dVFbL%m;H z;}ROEKi^)T;=xEyv#53aC-cSR`;!MOr+_Pb9;sNxXH|^E%^s8PWBBCBl%^PPtmR<# z-X5XywVehX-mKy5tkY~L=-mXNX4MgvJ7MP6u(dD8)K5OIKFE&~;2NLI391bzCQZ|x zks~6?87>_N1nmXhorQH)J_h(xK>GiuSsTuuE^GMKX`mC6^L}PHj*&sn&+PlpRNs#c z0dVEs#9C>!lugaSpm&^|euo68c136!d*T(8%u?sY>Hrknxc48#i-j=RV%K2ywxiW+ zE(ABbS#+0!sYGJiyt8~6bA(e4*eK!D(KHz)BxRfW_Twwj4!V!s*Yk-dGhCrowE~5H z>uPpR7+mFFCU1FWnItU;>hWdy_T{$`D27C(xICNLi&z&OT(0kK+Wan6#4Pu&18 z!e95C#*y=`)yX@@J&yd3O`g?V#z&R+9(_?aQ1p%M_TMYC0sm1aOg;MhvoK>Jm7DoE zTm_J2Pb4myeTi0X5+Y|M6$CnJr-$J&NN;prHb)6mh zs9Pu3b1&d7{l~>G>bsc}hF=dFPs=5a2caJZHjTVr=tXL--nG7$ERn4a=9A52;Z;mn zC^K{39Hp=Y-CkJizY4VGxibGFPVmrd%)Dd#w3<#zJov1eRqR>Qo!nQPNbkWApF3zI zZU6Xef=RhIUa1eGCtQ;u2E65#Z;0F?;PI0#bTxTz|8&KPFX92@MV>_N5%_!PD9(dC zr1(``^#KO$c6i&xPvJqPXPy_zNcFPV%!*uy*D}(6WT5?*B?B{oPg~|bZj=IOBDSSp z8IsSV_?MQ*gIl)gPmrjCd$_EJ(M}FGhCksl{t{l!^?JO`7LZTBV1C5BkgoGo4v4S4lI?gNF1xB53e;+yi@BUxiJJYJ;@%Yz)e0A5<` z;V^NU9>tEYpnUp$ZIX5w&-$5cV9@#TERVdsKLs1$c*LwmEMF< zb^Ge_MG|t7uBr@Jq2vE(u1wolUinx5gcocWt2A$c1aW=wWh_iL|^Y! zZ=O%vS5f}4#qiAEg&hixjQ;Mes=yilkx#6LQ@)3N6PQko=Izu~{{%=30MtkbeWeTL zB3S+GH)Q8&a`edGGXOlCYZyODwK7nxC_O_i63#}b-@m68D`mjZIqtFr3wS67VZL45 z->ZDoqy)znDiLcQY46!R%D(JfwU6(P@)Z$$anqDNB0xaVkiKSyeLc8^g(=;9%mxKV zN#6x2h<%v911_l!Fl>W5N2-cI6N8g6>Ur}I|8>0BVjb0w0Rm@p^vb$34 zpyRdO30`BISyKtyPk>nYrWIUNU^LB-jzN$YQwGx*x(|?~epM601QQ<8OuU8dI#W2qPr=Ltl{<2K_uLNxQ2rCy2T2 zt?R>A1>#Yl@>@yA9k~{ouLFpUJK{F{eq+~*B_ca+iK-s~wRPv8+lEnJCP|oER9iSK z76?M+rv4&g#KCl3C!E0?prU~}w}@>0pU4`gWhauHq#2C88W3YS05KKA{s!sG8|P+T zwORgB5>m@xzFD8@4rXcnD*ra?blb|1LiBSM`xs@@r3|*7@c&E%%>d1wH#<_!W&*!fWR6$FNx`rK8_Gdq-7jTS^y@?}H&$Sqnj!mj}O2_MfL z2-|L+hUVq^fco?C!XV(e+sFti&*bI|SiaYHCO}_X zB0qy|KFTjQyZ!}A-InLOtLa0{)jBDXnHhh0L9-1_tR>R^b=}OFEQXBRUsP#>LKM#= z7ZG@V0`IVm!;!`LSjGpJ&>KKW@=*_zE|}siR)0`qp*p!|sA8H=4GyYhq)#2EFu}=P z99w|8(CYKFkDol*yH)*45LMj^8FLmf?rWM?7~XW*!F}XXe2vrVxrR*cM{2(>jF^0G z8+!Z=dm5Ow0$s}cLmy=U*r6#7D#&$L`439BJ*Xx4;+Q1Q9{P{XWl_W9WgfqqK%EF( zzE-n~L|jYrXM5`a@<~tn$F}ba^qq9N@ak4d;+s_jH}&_&RU?EU(!nE zs40Ll(|m+nZYJBLO#*FQAygOOGLAN)9ukOKp$@91my|9D!OH2y#X(EoF3N&0K5c#u$p-PIFF5!+8+H}Vrh>B(QXJ z8A$KtgTm_$78hJg!Aa;4%xT8OhyynN-+4Op5R{f=X++Pp5H{7F*T^^ex0`fZ%$S?B z_jnqmZlX_h_4;RLzSJq_<=pj|8Q=D%Tswhai`rxbV{ykEPX2md>5z~e(-x@ zI6rE)g~od)=CZe#`7Pvn@saWUoG~UD=aJ{0^je;hLDKU#lsz}@lJr8J%ZKm@4sgJ` z@JX$`Ut!IczjdGylPbBG(3fpH2PTVTi~o~Mb8r3w&wT~$H-p_}X(xwrSm9@6BvV)9 zi>VAPiAL-WEX;MMMh9>jQQ;VXGXv|h8EGOb3%~Wq<;0YUK-U3Yy>{e#-J*VzzC$LB z{N)i{U3FtyV{;bLdEFm@-{6fPtNn7 z2nN7+;LO$}24UNQ)sa96 zim)<2HT@2KEW@Lf@dm5md8-v$D%ijS#pzM{d zk?@;o7d{FpHGiFn_xE75`A?Uhl^5 z+tfXyi4AU?Jj`QdPm=9`)`+4PDFS1O*rlEKxoNFR^t{3^nVEd@FQ;>Fia6vxAkad1 zwPeyJMh}54p7|v2MSa?QUdHks%&IWID88C_0ZUUmYi7)&0p znur0IDeV`H0GePaKR*|6k7N|gVdV6?mYXy40_<&GkG`F3F=cGgR$YqZi|7x>&J7!! zmBF+!IqWN)DFJ>ReYa{w1@K6IR$ssq zbwPen1RS5#G&oTA5oQJ0B$N4~da|=v&qzQ*ceQ|B$UU##3NU(P4o2gVDH+$oHAR6- zA=TI4y?**QHnGHX0jR;Y(o{Pq)~BkTtEF$soBQQ6(?4LF%1fyPOKU-eE9G(0TL=pTYt~4V$F`u)ilz8>@3Ta@0S=gc$B|~R2CDw) z(-I{R9v$Hk;DZjeeZ#XwqD| zDLrXj;2OP7!HO`$A|v}7P5}%TIa9Ev^vu4$RYGmldTk4u4uEUF-}=q|u?B>~iJApg zGT_Hii0jOi{Jj+XKCO6*D*hkTm+QYF3VrL*OaFqsB>fxqVkOX1Zlio*ld4L1@!}7n zlYAk_-0Rq1UO2<&#FZXBJ%IXRu#42Z1yxnTzV}ku>Tq;S0%Q!*H(Fpa`8{@K+(3Nd z6`XpePYB53Y0u=gdZ^rtLTihuC7Q%p^9T}E0NighM(hnGz%n)vcTali!}-+{UwP-W zEdgYHnFtaS^|K+vSM~8W^%~`X2*db&_D5|M28sdy?!ksd*X}K12VeREg)f+O%endP z3pZ96ohnP(o_v%1chc^jqxg+e)Y#tsE1Y{#Huof^E}f6Cv(#1hm3S0_JdIvlyhM}! zdj8~-yMb0wz4?Ev*{_2m=Kv{G_Up|Xb=pa{SEgq$2Tl#90tH$)v8Q?w_KB%ctd0_E z(4ci}=K`i9acjO^mlkE~5hGpB#c4n2a*}qx5A@w7R43;yV!~i@`Gh?MDq(QWY4 zmIe7wD*N$+%^7C_qU#;j`tmAHeCH85|CyEfypep}Zk?}I)ObiB^tqgec|<^fg06tL zU?C4Q+(`}#OPYx}5V)7=0c5hO&@?wWA$qcfyLSzGOgQC9{dnNsz$AkDN9AVH5!MFe z$?1`R!B6$(6hC}I#)G!llRGrT9j}6C^xsMrx4>eQ6HYs{y|PYm(Fq|1cjp0KP&v;D zZ|?$r*`LLAg5gUq=}D|DAZ<3z1x5;FkGrjex5)Pr7F z_91d4yUf2&)jb<3PCf(NBO@&tVtc$tV7(rji6mS%yW&>TbAWuj6n_%58`aqfkC|$*q_|w{9yYyDBv!CDR}On5qNX?B#Iuc+WK!gym-N;XTV)d%+Zj%95>R0mMVJKqd+9VqP; zk3JrEFeuogPXQgxoPZ?s0IB@$Exq|YbU*Zq+Ha%kvd?XmH*B5|GaRH_OAm&4Bx(P^Q!3`x;I4ZEANa;DzBJz<}A=)1`A78e__eiGRg5%uU%{ zjln$!kP7PF)b4a(BlpAl^`YaGBvqK^FQe*k905vYe{>_Fn=X-fmggDbbcTeo{G zk?9jb#OsaGtG}TKd&lXto9Jo|`q)GN0Ql=9oZ?jHJ+JZxdxf@`4*xiQ2DXenp5-VIw!6jLaPporQ}u}_2^T^d;ePlct7?}! zxZkK?{9UCr{M;|YY8R8>jk|k%nrrE<<-A*Au2l+Sqbg4cA<7jl9VarOF9$c|`{b^j zLlIUN+wxZ3v%Khwex9g8YGqs3!S9}|Y*S7e*dfnEMdS6|AH1)VicnnK5aaBK@nPq_eW^FV7@p_3)mt&|lsY%Ox z^3u2G{#j>w1@?d1yjZN;ssM?kpL|Xa_PcRfFpO;zRL3TNWV=Ec$YK+bl6$1`HMWyo zwqxIuGC*y@X*TmKrFu$VB%WS0lr$$Oxi-F-IFlNA8$uh5FG5<9?W&y@PwynPLw>}u zZbiPyT;9jG*nimevua+E=s~-*V@l`>^exE|NyMh1&q9RbC}JA*tL9`Szct(m;eF6n z*7V``rX#{(we6_Zt<6=S*9&cUN7BMK$M0a;YHy-pU!Wn7zJ)+L^{}#l!*)I3M3@Vu z5lv3m@0*Mq8RXfU_o#KWDQ;N)Q9Nql>3hoUbMSe$@7N|Mt8Gf#)sdeLJ-2GA-YRVO z#yYKFu$%e6ckbthXRW_nZ%3nd`KO2s{OmFg%}}?MDK7BFAkhQDDLMHqTdD5KQSKPbt=f_F*Q2Nsa z;$lt3h&EjO0yALBF9D@Z!keYx9{ptZ8%)?-uhk{)^u!}lnwVH0LL)Stybuzai16M%vEMXuR9y)T@5pgnw zYi^P5x%_|~B!96$rj1|BBUvz1H)MUv=0_|0X#HBC3maQ{8evfqG1!>Ljc)2o_EBo; zZluM-x7+Hei&VTaeV_Pezi>GG3ws=rUcxyQb_QKolD`U$XIsX@JAC}tieck~ zJp{I7`0JV#ejhuy6B!BrL0#SJQKM7!)xZBhfr;-NA@qXSRsjJ!*C7hSL$SW6;gSf4 zLUB!B)kmYfD|wgjt>Yv=SO9egZY58iO?teS4UduKlE$@Dp zUom6R&(db%#Czz#q6BFzdD)Y1mKos+vrO;uRvp=*<{$1)qvZmB!X;s|IAb1gR)v+< z#m*c0tvs~w8BBWN$msZq2cM4}zFn$wl+EJIzz_OcQ#j~{CBPtRlP9c(qTSOnpMRD= z8QzxohfC_m;Cjb1UjEw5{{4=IH8{(LlnRXVdj2;OmF#H01q$B2+^G1-KNB{jxk5x9 zR83X}#Vkt7$q3$wB=UGuGvwiG34RCVa>vMu&32U?SjsQfoffv52d;OLr@exgcG}1k zEd2|y6efA`G1~^jHS-LZFiqOQ2khUlpr;p0*6uOJ1#QqG#2e|^jbikATIouUBV0d4 z_zP%bak9Et9C3fq?~@m)ua$y0*oko?@0Jcs%%hH5QYa4h)K!@!xVfz%;^+nWb3@~h zWE|UW)SY05;+0B%cFSrsg3B&^40Kwi<7W)+z|8m=S=xSlTfk8&mCL97O&Oxw^w%UoypOZ8v__U?wqN~F zyw0k1LWQuOs4np3HNeBK-N#J1>!(-+vb$?EJIxENkX#sqP}b?1e*Pu(c=sJbTk*&` z#KO3~ta#*OSP;kf3swH0olCx4-wd>G&QJ`TJy#D2hk2T}JHwYcOm@>&_O4*IN$|( zY4dAV_fA{h9Fp?u+Nfc<>$!Os)?Vum{S4z1XRQ(9Xhi2#@CJLLdAcTK3wY#=LCpCK zL3hEbI;tu8M!WDw`<2aQHZOe0T;Ww>j$48fY z-|-vE#^NLOT<>va7uqlemK^5J^KBK{rEV`WTMDkjlYu8`7_D`$=ldcs4peU98(ll< zu2hX+7`THE^zgw<>Iu-T%D!FFAm(-~d|xC;X3>W+P=msFr}8jJ z@+vX@YbkavZIT;BxY&}fBc1? zo&?JmfHMnW!|!I0Xo3S(9hBKKvzpViDw)oP@hKsZB`F7-D1u=hGg5RmA%PI^_ma+` zbgrc%|4_{;)kb6ohqT)_hf(tx@x@YAe{#*E7p6u*B8~<*q;=d1FS|TxRSiA$aO(Yr5PW?k7;Ci07o26GtlEZqxSwHgg>*DKH^8TGqx(e&lMeV)E zCWg`NRZu>32?cr!xf?aW&#qgD!(ETT7;Z7e*Ql;KnNtt@QWZ%!s4VO1#ILP=Z>!B)l?%gb zk!Ta+s`K+UA?JvG-XB=*J>Qizhi4B70L4BiV3JAVK$!rhWuv!o@ISjt&fK`Ydu)WbMUC>JdLLh;Ooj@%4Gz%vgsvzk)rE^se5c-;G+I;T`B6G z({A0;v;`*+4AVs3r#jEJK`+g6T{O<@41e>q_#8Yt9NGQ2@cS(@GEl%)Z6<3{NveKG zS`vfdUAdQzGeO=L8oYL@AS@%cwtv_diQJTp#yb{0e!_ve%Zyd9h#)Js>e>I#%X(74 zu;Qr`02@-BYmAF>cM*My95N*ndj;6f_C|awx8{fpZF)BzVlBXum;2&+ry@(v=;m7Y z0OqHgf=W6=T1GqCvuo6sbB%v$|1_?R+ySw)akT%n3GH!Hc(B>iM88})0rOB(eUC%0 z?XgAY0F4#-(Rq$B*GCtqH)T0$Hnd0^GET)w9`s|&D-sbNhQd~89VSF8!Bg#fX3Y>u zGlO8J3G@?5;Xn~e+j-W=IrM8~J(WD|3;`wYbVwzi&oN=O0Gn})r^T7glc zHhC{Cj`52i`NkVRU~=S%bJjcaOsDuO)xi?tW_%{Znx9KRS_LZ<__UEX80hcJd8gZI z%67Z{49l>FzvS~ZhJR$iAd(qao-0Gu#-OKCh>4;;jtbx{~ zca-V0#wSfsX>cX0uLzq<-pubTI&aCtE6bpmsAoR6-F%m?5``=hIKh>Rf?mvN;h<{~ z7}+$k(lh}Q;8118M6D_Pg zfp1?fi4HV5Spm^zb5<0v4Ij9`pXFTt7=y?JY+=8>*2CGq_rSwH)=@wLw71Wd%oTB&_rozQMK3Oq+MTehhU(@f*7264`28rQm1ITf~}o+-(xn& ztay-MIDDmYgat(@`7+RW4(_rsq-`5RKHcmxdo4Px^&F8qkQv)!4j2@1+%>6X0~RFf zu_A5M0kJ9nB9ZJCDZV7St-E>O@Kfpy*PGCfwS?x9_k`hP)4#`vz}uIGBXv7YTlAXz z5r;{-2vfb~jekBt-Se0g-9Zhv#YH_K7k0Pm<=aIZ9%$)MuN`eiy3acC;_0jdP=Vn4!qk*~Z+o?bxWML;NOvo1T>Cp#1kx ztU%LO!A6zIXQ2TfEWUAKd(X-kSoh2o21zp#?P29W zA#@k(qUR0O!yztT@4(Glc~D>(!&Jt1_6$q_OU29b|M~&4gqkK}rLOAxW@*!K%(OL9 zI?Cs9hWHw=OFMO!%u`VRll|yX=;`gDo1dSVh{#DDIr8#|rmD(gC%)4ckav7fg!R}A z_sstZ+?PWA+8{sTe-`h`Rp2f_{}U6o(m7yloNo&EKWOVqA@s%z#E*&R7t}G)tLi@WKOhpcuOF&e3K?w zw(h;SJk+Co1ni?afW(cCJVPPh@Ch+tF%cS)La?mpbZ)r(#k*?%?4?GM=avd@awUURf@v?1KfG}jtE0@4 zELX_)8TLoJu4RR9qOZw$oaaP>a++y8tQ-0`q#RTSvaUq3$fhPIjqG;_NW@}+vM+s7 z)vaMQAmrOkME0+!t~3kDt{fcy3h2|)&m6H78eyMhV7`wSHz0Kxfgrg9VlY!gx|)Tr zSw`8?)^r-e3;6a%xIJYY2kr8#pF{+$Z_bfBd?}DzXI7oXp7=;j<=K}MP@bSYSGdD0 zk@$TH$l;quOSzxmPQTc-_fG$8dISbDpMs$enw-L7m>}8@y00_#wc;FSUt}Z^4l)UC zk3YwT0;h$a1J~ZwQ34~`*~dgefq5I=kDKCF>aEDlyIwe8`KD|yDzziDLF!(XS_$Q- z_386=FMpoO@Zmif6Yw%Mc`-!#4)Wy@qSR-dpxx&oI7QktdjIZkhCAlHylN$FWbtdr z&+ZMID^c`LAvQBCl-l${8!+4um|?Ubf=~mAMGzNz2tYs9ml2hq@5SIOy&UGbFU)GhscS zkap$J=LBau!@>p+2LCKbPP=w{~`G>96H? zMj#dZoZgnc%k2UPH~a;lO^u`nKPU1FV!Xd^WtFG5j4H7=>U{vEtU#vl&$qg+6-pw> z4}1V;rzjYMFN%SV9U!k$&#VTG#F{x4px#Ic2|}xz5WgM6u#f@)H)A($*>7i-MAgBU zeD8V+`UpEckZ8p%uQ?0>X8FtP(l74T38kI(uCU^@Xp|t&EYaInCMmMaQT6lKyyPpS z+gUiUoPnK~FuR+)cx%`aoLiZ}4v)<`50f+faBX%7hRW}JKvK+_jwRTY5K1dV_oFSt zF~bvmjF2Cvu_BG1%Klm1`+XJ(Z!j3Q!bK}SaFfeM7I)gi2+P<4&&VT%cc~#Ea35QB z^+m;(5fgo}*?K?^4dqRCnaRv!-SRU%!*{y*!+O__Tznr8Vk^@yeJVA;V(I)+$cigX zn8$DC0J6N0Bu0Oq!Z2E4KVEF8d$+y+j_F~b55Hlat8mmjudvbN7x*eT7l%~fG4r4! z5e7T^Q2DK+WczJ=(5aCY?LzwVUWlycME6@{6I^FTql(% zRsL#r*w5$g!v{KMOxR#AEgP0IvRh7&qg3WAg-csExMl5yVS8QTAvbSnKY_1i-j>aG z)BR*{6P6Jt!i3d3jRZ$vgsD=%X0o9eZ>)sV2gGliA3^f*Zw!9gyIw_^9jIc$X&xr`H~h6>2Ti&VeIfZ`CS5n^XP@ z%_Ml=h9~)Zi2k3mgwfmQ)7(Q_&py5#V&56YzcOhbQ2yoO?3a{r+r_I=Z&VyPmA^zF zy4WN~Do_9RGWbrPPP$kgcB~t7mm-=>C*0qgoM1$wC_Hv^!#kJ^itN<#=G<&^{W$%P zhd>o|wT!NLpJYExf87d*Bpv$Y+t%0<=~S2ZfFN^wNc$ag^Rw=gS8`N83}p8*KszA9 zD~#Byhl>IWjg5gnp_kf;KH0IYdYK$PdAHG})ua7Uom*c|uXj=Z0I?)AA;Kd`U+~d0 zy`>0@^z04&d{c0*@it&On!+vBNw;~0k45Z0wI+#iW^?$2|MyM*Z&wG?mSx^GZ0u*& zWc0G>zLuYl2OV5|&o!#E*wG&k)A*>Arbooh3a}F=7=42W-pVPyCjG?EKb7UNi_7_LF5G64Vnpxs&_isJ&-~tak_GL8)NCBRVvL!dtZRr z0EXJ!dEtg@1%eh{Wr(gbCT6rax8O6gk?I1-?hwk9&gx;5c%mkHX%$8+gL)V%P4z~7 za8x>=I-)F2HY1*U$q1f*Ct7fUDY?kh_-<82Qz(AcLPuIe)o4D<(yV6uF>W}PkuI4q z_fLEm&wKt6KRljJG#j<76L zk^b~?l*n5)gk0NoA)23eBJ~~61O^v;Ao}YIaeYG>a@@-}~8Dvd&DQkKO?@6Bi04HY1u8f+nUd12A zeIM^KKi<2pQ7ny_>qgj^wE;xQu<-`AXCX}TGphX z9AY)apHiPEFlwa?)Xg~Q?_~VVve4tLUQ~ww?iEy#i zPhrx}!s1nN*{My#!?hRwY3M9%H6Qx(?V~r;-XnZp*j7KaHk$c~*1c99c($V{*HY;maEt^(1QN%4E+U|~?o!Q_ zD?t1=;MD`$-y#xG;!I2+vP|V}6yrrEqOqrntLC#zOGIAlzt8f67lS{o=wa7&DZ82CJ^YMa;D%=mpDuK*&>q|xkdKh`pO1&A(X6N1-V0T4J|dp*ApKXZ=N?O_}5 zO}`jNTn1`GSolQ-XlN?dW3pntgRQVElc`cY)5BuQwR+{Z!l8w?u&IZ@d&^Q+nCIvA z%HFeS$@KXnSEC5GqW9wGepFeOm({%iZhR=-PZfBkCQda5za}Tm?@nqre?wOIzhVoM z+b!6UC&3L!+2@g=W3Z%;1v^)_cGLsLA-rT*`cq!m#xE8c!*i#+yS*F*U4Om9#KIJ$W{|T<)4x(^g|IbJE$|_!{x*fQn`xk$te)r|9uB~ z;Yl;@gwNG)o)ycz13}7F1y%~Ti)Xw2$Tkt0M+5&CYwsD=#J{%x(xeKZC`C#T6zL#U zX#qh{ih%UqBy><|0)fy$rHDuu0-_X=CRKV-QCd(s2%$(X2|a<3oI!tkKhJ*tWu3Fu zdEr8mNhUL7=6jdVb=_0oTi}ZA3?!bz(-@SV`ToE0nh z!XCQLtB>WY9m>o-S%G);IEXX(LntGf$kv8&tIgdVBbwO+tlf@4EWX9c&pfGX8^1~f zuBLIoOruPRXv!bqMFCwqnmY(VbC;^2!FK{RG*E&vw>3m&n0~Bt0!z~E(9vFSY7wKI zkJQLZLX^@EXd&({kXA{LcIgwAlrANvK%ge`(;Q4!cTzt^`^*40Na&?X{Me)u_C(y6 z8~^I>L&8x=qLM@uuzn|4cQtsQy#Wn`KLb;41r)DzyO7_m!Zy+|r_!kyX&_7wmR)Ss zwt=^5*IQ7uZ*Cy&!0-)^`f8ulW6U4$Zf)7QNb~K_Cwq?fEf2W{@Gve@QAV`ZTD^(e zZFxP_)!{c}%Sx<-?33c*(MI~GPX40{I-?+oCWFvJX~ctwJ-7A9y=i5_GrL)6)VfEA znN^tn_^H6^Y~&H^RMaU?{_wmJ;wLxHL$1)f=u#5AlLXi}1Tj<4cGQZVcx--a4eJqi z0X1xY@V#5%}g_^x)vtC#^1_TYY&FuTi+cb^{>NLT$VX zM~6&0Lq5R9H^h#f+hKn-VoyB)wm9CxGdcd1`xD}3hznIuiWd22?Ap7m`?b+DBMQxW zb#;RQlp8C;Z!6>rZx(8}bw%G&6uA(V*XRaI!WK2$}(#+e-SszJA}LA^`^qv%Q(Ix2EEHTtuO&Rbhe{I{rl z2fjV#`Td2Yis8zhRO{U;x{eLiYM;V@GV8`LlvBL;MT?TcBj6@^t2{II}w8_K#MlZ ztHVx*o|5L0`HAg^u40?pBhbT$Hfu&>-Q3q0jI zZ9gcd`;eIK7`*mPWpuiEs{VShV~96WE`+GBe31M@Wlxh^<7s=7ruD)Mid%vzR8j^yQA+Hh*%M&X%789ROW@{rTDi9Fm z7yfSQfwyiS{PORuC182a(;VMuupAaT=YX&>LEz5imu zgHYh>UGPW?Jhd!bG_q&Tz(SoT-Osl-UsHY|w}dWJfZH~2#Iy4pYL##JA05Q|OT6$z z8nB8YoHn9z&pjRZ76AFC^fos6rj@J;S}C6#dP$WrPl4kWp&Zis9cYky!6>G;Ldc-! zSC4^2EsF^ZWmSDhsDuva+7iVYtX(#I3kJaxWo^>)KW&UcA9wnN7faibmg4n2{?ke_R{<#i9I%y9|C} z`|3K-)pz9ERaDF-^tI(-f!&_w~9U@ zU2l14pWI6L7)G?jS>%`$=fORyfNJZWaFUt@J^>hz+?en9>p3&W=Iz^FS49i>K=a8@ z(UDv;{gxfrBNs)C>SkO5tP zs!TiQdz+_=O$Dj>2hv&88Zqr+7TyIm4wCD-O*u~ohPb;DKhq34~p)Lh-)d|60%zS74DHNrk81vGU2 zc4);(COrX7Mb3R>;MpVjSZWmm;@_Jm*wvO4LJ12Za8L3reRME0j!w}vK=pZWfW6gc z;_qvaq$MDLSQ$P*1@$1ka%N^gzjq}Fu&;idKXc-+X?5n-!&s{QBJAKgeRLYV}(YO6zF3{N1gcr{F*53q|K zIz(WV@tIJ~F*yPdKci<#*;qn^rV-)JE++~g%(&@;U9ZC;27UXrQd8Vmv!+&#Jwf&a6u zEe%Z3oP?M(=+S-?rr&k67r_VDXZnVuh3J0X0xEV?|Lvt_Byi+VQg1a&GC{kk{V~rC z&m?ulk(x}DbD6JfNB($TQV2xjA*M&(_lXw~}wg*;ml8<~aLU&rMK7CWEr_t9{0)Mq+}+Z=4Ui@t&J} zgdq6Dy^^(!KGQD1Z~(6(zTGha<_bYfKgD}g0Ps_tuZP`dNFd?nDsiwKZ^wg3Ix(Gj zS|gNC$fnvJT!Q1Bz@7DjnL)wJ2bC>H3@4$c<hVGr9M*R^$hp7)?N&Is?aE?`)NJDey0nF|R!Cx3= zwmVxH-Cj)n+l2-1Rv;0dqdCE*-S~cR@Sf6}j|u3Gn0k#)SM1voU2LBHU4bV+-JZIf z4`=v^nP83!IqdNJ!XPUt$(R-sTVkDI(^bkbsA;+CqwA5Vdj7 zdZQ5;w}H?Ami8r(m^$v|b$j=&vkl}fR#XiCR@vBC0N)}CtnBk5&ry)t=>V>dAI%#) zqcwZB{u@mqqr1J2td<%Neu#|fgPU)OHLY;fxh_q*&$mA8H=)D8(!VQmV$}$5v~8!6 zQf+C4_u-w&E+)OYQ^%{_bs**(B*%6~wt9J=H*V%{(Jj z^JKtd)uBy^LF|+IfxU5)NU~OAs$Oz-T$A?$7!R00B#AsW!}olV-GgIJ;dPyUQ(E(8 zfwSITRaySu+OctLjzfgEsf3*1(>nDZ2N3*__B7KY+EzAIY~a-qK5n6iKnOc&yJlVf=A&T?JRg8_ zhjLfCrM%t@C@S+4b&ZWSL2PyftqPEIOsoE4VHB6x4e2-M+pf*Wu@(;HA>(p4y9eI^ zjYA`ToR?QQ(8M=Zrvd&SrXs471C(^p(o6jKA;$|-#jM~lR15?e(&RQ3QjMRmz^?h# z-^F$vsTiqrfy6WmTG_*hDKT=heRRWBwV_45aHC54kBTSmulB*vhUd zKOad{W-H|kW_3JHYI9y6YtPBC(pEN z4oMUm#$7bG8~^ltO44gF!ppQ*F2IsNpXM~*TVowZZQE_VITAidpDUlsw&AyO8+mW$ zQFXwI_OOLv|7jrY7SQaf(d(+rCC(o0D)Uu^yF|jw)8!Il!6LPEklaDJ(QY5e5t*zH zpyhe1^{zQzlJnhQuP5&WMatU4!a2RUpeNtXy{)FVVbg5hXrbYmz8d&`&#I`--X6Ad zF=M2kxE&}iDNSnm@Gjrn5oGTb{X+X9H~f8GNI zu(V4gpox??pa#?YxRRKQ#`}7LDs~i6gr=!Fr_`y|y@vH+#27cn!(R_)Ob!X4_Fe@( zR&_0nZ2co5SY_vWOCTH@2WQ-YU<>3tSN1uETTkHFA>RPjFF+$8Z0%1xQ>8`GA3w2* zW{SA<f|M1-NE~PnPd+K#>15+%2`U3x2@03sY z(NP|U3@*u@qo>j_V<4tv+d);8I$Gg6MRCrZX?$kM52C>2LtQ#pAwDc5S9w@tMpcJs|pZ zfa*x#g@+vV*N*EHA&*W&xvj@Yq1;$*p4hoc8djk1lW5zZw!9czNpfB#N9v=|Y*KJ# zL~_ItD|L&Vr8IG4rEWU>w0hjO>aY@qLwLS8J?s7qV#|)^p%bps+-!_?$5Z2k@4En< zYv-Ra^Eqg=Rpc>l7)4&gQB-;~5DE0gD&aGQpy`c8m^Lx_tYLEeGi;hr?G)#SlL>`< z#QG82!#ZInxyh};-xlFJV265&{zh!!Dgl9KHLou{cQhb)a#pj+F+sjQC(S2k7E0Kg zg>Y>qa5K%!=S)@|fv~}v6vtIib;l%2t7&5^4-DkH$-34Tr@7Q<1hPRb=`*W%ybJ!xaIAgnk)FHU)rk{Z|xFqZW3a0P^Whl@CP-_=@rtWM1f_ZF>hS^*u(+$fRiiE96uHSv+qY4w&xx~Rh!Z`xGdnWMIYyh9c&?+-LJ$o3( zL!6tF4w$^M{vw=6?6?<|Rm}gqw`-#vK3eHX6*23?mjhpi6P-L)e29nYCYA8XkDLQX z_F6I0zF$K|38Bqmc2vs_HSVh)Tn)!<8g8WOuW(inpL*?m;YWtxeFtx{Y+cKRJ@z9y zSP0s8c+w13{*OI_pP5dmWEwXV>3 zy9VVHoQKHKi@^IsA5ChFFXCAxH{OPfw;pS`FQ6;;IAX2nOs3RU_4xAKfQPDvwyj@Dax&*C0j5h%n(){UbPblnrCK-MYfY zM0}~kI=AN!e8c#sDQDoT9XFQBi|5+7F1M$jpuVBnO#!Q!DsCo|Jh)vOSd1+pYBBPD z26*~}>LW4kIf8@UInyKE3@inR9R`+-hMIa3>i2bWkVj_^0O|)QfIHnR2pR#@5SIt8 z0o$4anujigAS{zD643|49ZKd9T!hxJ4jrk*a(DNhIs4JSFlR~N%Ni~0|8*q~S zYJ{sJ`C2EX1C$!)6zrZ+L&=81&Ra#QDxBM_iNsd)570hT3+=8Ubb)Y~=?>is<^)=( zo+!24x7Z|_R-yV{{Hftp6}aGErNd$t+RK!i^|9_&InVToTvCVwfz;Kk692L@gdXU_%~Vg()~U|CW1U^hSW*PGaEIEI(fjPl+1- z_0QMLS7E@_-_V3vqp0C_wK26UP7Bl>iXAJE%Arh*L~OmBRt01QZ9p%`$_DGF{b?>C zPOEZAn#VQks<|VLBG1^|PgR8uZ?YvnRfE^u(?{J>nr#RbBl}Y!pL!CnD_fE1&IIyw z*04}mtKs2&yqHbTPitkT$n?i#X;^$gGzeTUD%`B0$4=NEe%0h~*FZz~?5{lD-IGN# zjMppVmrc>{6*nwgZa}uH4nz&Sj|0pC&cT7^h4<%AJh}Hs=O12CIG6Cq@8r6rW{+5L zluHdhiB2>p|0B>shJEdgQ|7yen0$ZisYNl41wdBs9S5U7RDO%^6`L1p%N)@N?IOK{ zy@OXxtTRl1T5H2u->%C5o~*BLdc7arh!;!;=R~79SO}I~fe0G;`C{o5C)e!P_=l{~ zDwYq0gPHKgQeJ;KeEvIv*}!h?;B--xrVsJ%I3g| z1aO*w3f$peyV5&P*k98Y`*gkDS2h*w3~>9#DyV50h55=+r9Ya1#3W#9V&=OnYte9l0OUWfd7vcegP>;iGnC8G|GjODwVGyric{|wmvZ94R4GE9NxW^B=N z3!GOT_?noCCRxmWQnGyiKge`{EsaJC|3~-_Etn{c)ojKcs>Aao=eX%cIyzL}C?S8c zhovvP9J88FN*KvRPTCLybaW%LNQXdx#*bx1M!rQuqN0KoABr!$4aQ>wrj z*8mn7xi|&}P!`kXhknt-v+;;izj5CVC6n8F=*_TQK8*10SX==8W-~=dX(6JF#}5HS~i=CS=Z&u^LrY-X4q{df9wdZBg0wvCHnI; z(-}SfUrz__?je*vxIjAENE=b=5@?vc+J<2G7THq3Tjg^@t--Y|jESFD)E+BqBd9)m zby#B;V?llLoe{arBFOu#iu=&<#3ux6zNNlthc%78t`+SYS7h?A!^O!ZSN}oNy8>4n z{{^G#s5#!q*~c()##BW0d%ta_Q3yGRB&k-mUvyxvzDMffj>1vfVH4+ud6=5`yMzw# zC%Y(!Vt8xWw8UYmXomZ+&~u+zYO~o&IKQ?sG=VA1Q^`2WDc+u-}+$1#SG=%D13DfE!lM{J!b&8WOit%-5VyvB>8)*K-#SUnu-leh2TFMv`Go{5AKKS&L$7#_>q3n!r zU&qV{OwROAu@msVNTa=}m709Xb162G7F3})Qs*!XTl#baFEB){@^##RKsTdx0O!Sx zcmDe&LAhf(IQDc5hZ*)@7oe~Er8m!jR>;OF>22}}Wk3!c1$xcIq;T%3b!S5S5x@nK zc;qalcER$iF=~g8W6-(fn!g0p3ES(Xfu$s1O@gq)jr2P0Rw;q_vQYs@ES%5gI|3v9j} z*Q*;^0~V)Cn+Y*#&v?wTiFH3&N_*}p%vE-khK^5h;#ziXy>IMdmHB0SF2Tb34 zv8)E2r)y{Wi@HP5r+v>kQ@X(vkX7NO4Ox*)C^&1!!>I^m@0GDmP#A(o*IVw!Fs0X(eRKT@+{r zvxV7iy#mA&qC+)&3JT);o40q=cto84O}`_g@TBIEZZk{lH`?5`z639>sj<@|BY!Z$ zY%bq>T0Qg6O6|L&soptNPhP6uo2 z&b3pFC>lwG)wr98LI5$QzIT)fSdw>YbScfm%wOgI8&40&$l9+1*kFe;-|?nz-T~e> z28~F%UO#tI{CwBF*!c96{2Vt^{}+K%cO@;zpZ;Dn{&p`UxlT!w*BSl z1wdf3&rReD%%jaGq^uue?LRU|Zm8BN{vuF_t9nIE-^iX#%!>=K|E&yq=u_86%se*V zPG@8${9dS(4qKoKn7kfZP4W?gthTa>|=$%RNBcyFI-U&wufIaYQZbX{P2* zDSuZvvJw#1l$3d~|Bg|urXk#71nR9DBsI2QpJG_SFnm;s^jjQ;6l{Ddb?v zF{xWY37wqkEL}PvL`Xq3KChWK$)I|K_6Lu5|AEz?6VH(eXM_L3@u*VSK)0f*FR+-| zIt=BoMBwoC-Dk9Y z9PGZuIB+E>aEts8p#I(nP4!zBCdz`#uA7PO@F0E}Qn+j{nap8eK`>rXHlib70+TJ~n{i`iuA(@F1UVDgz$-o+N&JEQ(Jy=y`95;KO%%wC(}RxoevB&VKl)yA z8Lx?PNOgxhEjr@vEm^k%^s=vT576X=YP|X~E{r~|1B$(&&>~4D!GH1f*+}>!pUSDW zW8Jz_fCJm{NCQ!Ky{iuknH=va`r6GQ8m8rET5n)8uK7vdbAvGk-f~s z_xb_KT%n16^2w^|>&HefIHOQgMHy66U&-dCDxxO45W;xHflcZsg!{;MM!pC*0Q=v; z5^km~y7Lfa>Sc`nf%X@lSGow)mgIS#`V7~8+{~}uIel;@3=sSq?|w<5rpDgS&q%s_ zTW8T=2>ZF3pWT`=$CgKHd^m1rDF7L_t{U>7P_+&dgJM5(n((M^kV!Sg<9lvaEVsk+ z-3kg6J`1&6kN`t<^NM_L0Qw7nginn9H;#{Ci?>T3luOsgeXOv<3S5*`48x7em3hmx zXGfz~ZoSQ5YP)xPfcDSJfOLuTTGIQ_oJzxKd)CiQaKlQldEtl-=MH|;i5PhZJOMbb zotBh+mN9Lse(-?5=gwb%pW3eY4KxNZsF{@Vm9ssBtv@>R`Nq*+%1&d;Aa8;nsz_c& zF6WX_{$#FARhGnFRN-9fT&>Czj3w0vi-%U&4}_69)Q^S>2*@w_H_*Ika}VdBvJi+$?8 zQk6)2YMDxyc7X+!Hsirh9clmnoy8}t7_&k^_;JQaZh`zfAH|9;pe^NO?x4Q0V=S115iL2-#*Gq%w??QxPC6>5eLtC+AB}0ek_~p!_>!1^%U29oKJsm8 z(P_yqH&fH<>IY4M#c@M-LV=yn>O(a~wp)(z4)(#K@?^oYxs|)vWiE9KspV-@ZP{~0 zDcOw5bLXd3fh%R@3SiOx5`8Vk2NHt>@Wk;!;%QE|I|}XH#SrfT#w97Vp;crbUr+!a zSM7BMXto&NHQO|3D_&?gZ9|rvrUm=7;f^!0LKlNh0r!K~PJ%kLs4!ZD8w}%M@%r}B z>u$1c#yerc>>NmP^C1Q zFZQ*qciC>vp`Vts0EfV(-sDi##KjE`rdk=J9lU20rY0xr7k+HabQ6XIOuzuir93~L zT~P&F*-~YwJmaqtAj~!OI5F#-g96xgGM*dM#Q#P=P@`kgPZxMx*89P!-%-$dT9p3C zm%#52Zri2wO;n<;rd|yFM|Thnc8dA?SzZpHBd%tdu1_+0|5l{aHOilua5ZkBH_ee^ zE14~-M^h<+Wsy_7U&C6fuX8ui&+Ge#A-6|wD7qjEYz~9!+`IPBW!NKR2FA&=KW>iA zU*pQfx*78@liT7(IiQf7rij|arQx5t;s&tK33A2Z9~rQ_GD(knc%>CPG@WhjClb6H zNTIXoH)DhHEwNqWE5W$rO0-EaazvW%y)yAG2b+9&YoAEJH*_Bj7PQQM+WdBkhQ7SU z-#+$3TUno>uzT-&m9~3LOu{*vO$e50T|q^i+~C;ey!!qqCoU4ocuv^ z=?>lXWKCfD$Q$lI(?|3eImnu;t}rCriVLr-@BHS+av?VVBaNs!&!zzz1Dp9+rxRwl zZ=>olOXk=78X8^zdPrOVYVgu7xekC!5u8x>6Akv%23Nivo;U)%&49kKUcGH3&dYJN zcAxsk*tfjFzp4V`6)Qjq5VpSC+>9kRQL&XLDs#+S9Qt$dA$o0j6q#V_CcoQj{B|$hoZNXRwDE(XNkk?O#aPBgC zbI%Jl0d~B5bemnKo`3whYQJV}v7BJ+j~xN*<~lo^_2D7eL;=VHV0eq`GyEj(H&j?{ z^86G~A(jB2^`E!c!w&7cp$*rUb9|?~`5=$9N0?NeQvO`OsLcFqy=d2sp!AF1QH8U! z0%W-xOZ7q>yh5kuc^92#Pdq@-F`c60LWktTr7!wtVgq9ieAKj04?~noFw&$_ zDWrJAwMs|wT*$2_&L7Alq1shTIkx4s<0xOAdU$@<-JMUpjbY;R!&(pi^#NXD7L7}w zyAq*u8|zv}xjvB{3XBPbqXTOEGiPD~>-@>2#|@l}%_K4-<0sVEt@~46Nusd%js0X1 zNR)50jVbu@od05H4>m2AY2M=l zV1we8H~Yad`9`4HgYzUtRqR?N%~G`Z`!6yz->ZM}=ou~M{U{QpTH_rRIS7RP{MafP z7C>r+*iTroyHKZUwmG&QS9%wJ!%=@qs1tC32M6AF4mh2v^BHS$Yh{X;MWm{8GVwEcx@P|^H2@^*1+>nnVJBiHhy9Z`z|4I4d zV|ObFwQhz+KGg>UA;V?F-Qzuata>fVRGRA*P%ykRA!(T?kI;v7<%3lPQ}!WspeH^?h} zkOwFA06VJbt6yhxsBvqAk<|^CXxbwUxpeAcd0)K`R{ohtQx@PYtMnUnvA?!i8bzl<2Tor z2zILs6t8JWbD8v4v*>HDump%^(w9AwpsuH2ImU6ChH$gi&m>+Plt^g+axh*9_I`|Q z`?dL+8cYo2CQkkcZy?rq3#ci~EEKKP`>Ol^i}tr{AH5c7KR=4?}ltn-u(8 zpkkI4jRbP5&-%;EZT^6dL)Kj^$G>TLF(UQ{>A|KG-NS$`LYvMjh26{z$tsssV|yLR zANTbnetaF>#wc8|Kcw)#Wsp16-o^Rh;iCYJrI7PxZTcp%nBxs#HT;L1ZvyT&{W6yb ztW|ATE8 zKAax2si+9UqOM~@R=&Oy5d09;!>-Dc5Y-nf1LM*db5Z^{Bhu4KzQ6ipw^QoZ+llH; zTk@`k`qIyFDk;!F0iHVJ!+V#JZXRV^zArz#IwPYi zJXM7LcjWk0d@!!E{kSFga0guGbp*esO#DoCusmcr7Ni=Y+IWx5M=rV!QlQD}qrjtS z>!5SJpcRR#n&Nsj=Bz^1)-&6CG#Q3k8SzcFi$qDck=;KX|GWGDz|*hdt9N~D8qCGk zEK0UFt%?-+g$oCwP}AB{!Jl_3az=$sVpi>`E=IyCkBBA;7?FfP9}*O!Gax*M%70R^W?`J2l)0 zo(WZUoT0liE&b|~f-Q!fyhrqor)K?$+VW*tSpOstDrjQHE76A1EMkE~g z9j&JM1vjPo++PI)Cs}6?81}UoU992kvCHWZJsG4Gq02OfM}dOPv=c;OGs^m469l4<1)fFK9Bwdqz38|F40U5zl0jZMSTP&W4u z_^L72tFR1qI^q|X@sh10xb2qt=~aKf@yBD69SY~Ll%r5N@&K+(=Pb{jNcrxFcLhA6 zeGf7oNO{H8Ty1GtY;O#?8P>i9g4wp-z}Wk7IDH}n1Ml|-Nq2MH@v#bgXX%*k+*DIJ zOc~|&iM#b}5`(1r9DCFt)P}f471bdl-dZ?p00PwP(n;|>a{I19C0c}$Jcj$ta}Ahc z2I64i2+fbDDi|fgezi!KR-F)%=n63Jf~mw4`Ltl9oA%6O9fSLfXX+nUSnHJwqGGdO zdNnIAz*Rg=XH@j5oV6&o4D{_ zHv9qR`hZI7Tg(pmy|5t#cjf!PuWPndPMk5}VV{dp#)0A&kqvH#Ygc=|fij>#HS$t4 z@*_YW=xT>~aQ2gUiY5Bg{NWa8@Z-tg`ynS423BUFwp%k8@`JB*pd5yCa(ko-^qwmb zllj@;p-+qf0#CT6O#-!w4(XLxo6>*HY$we55*-TTv;yI1~KGa24-q}+#vC7*=OKpf1pxD zuk}6T@YL#2Z4143PKR8be&N;*`w0K4P>C4+`#^hvONwRfuBbGNmT$(s`27YIS{-Rw z;$NGkll_-BVZZqKeBFwc_sWEJGZ*kYtMRTs_-oV>fB(Pc-YJs0>vnBt@yz}K4rlq1R(1G@m-#RC_aPLcWb&p zERRAKU+Ke9f1mj`3vP;a;PaN~1S*{kQ`N`1uW7E=E<%Uh82#7{id7p$S-kG)abK9- z)%6C`DAFxl9kx)b5&qDASVNHAwfptx`BV@J@}Ap+V9^R-&`a`78!Kn*y8IM!EQt8& ze@nG1e#cg^LI&dV=P))mq!&aL70Jl^Pi(>!xX!i5{A7F(n}3+kU}`isiSvM90t?r= z`Kv`=V6Jh)w}#7hKKb@V+$6lrdoGMzTH1VSq8CDN`WIwB{-A9;p1qoz$uLB3nd)hy z@_fkT!-KfL$A9KUznsm%6nsH}!dI_~J+hsVnt`mlyC+U}Xrf_*eEpU(8y6*nYcz?} z9ezQn?%G+4yWQc`Qr2mVgOCsp#OTFcfwxyK+rEBwUGvY$)x#9WwDEdha)9$*IirvN zvgQ#OizYP~yDV7608mvv25^btI0-4WYZU0;x{a2%o$2SCnm%vz)M8Q(4Gb+(u^6SI zbm%{T_8*4Z5_eV=ooc{Y)?GZbHb3%N{~RG}h(U>e(#WN`!5PVrJ>Q`qP{M$l1g@kg zvcs70#1g%@&+!cwb?}iccH37)c*Fl><-MZtisH$>=9u5yognGmnRB%>)WNP)atdRd7n=<+uX>h( zbVJ*IgH|@=4phjMB*pPh`_MDxioC4lN}R|SAe>GUTj}(~Pu)_N216u7P*?;5I3YN9 zA^YCTySWv8PKf8j=1WzAjX~fAtsD8Ww7Qh;l6F8H=%?W1)cr&f0Xt85`O4wM#5 zycJSX;&}n6y_jbU`5m<}>$v26bA=eGY%aiJ0_KZ{CyPW?PSqHs`Fqa)lCm$+E8QP!?_yJP+FN`-t#5ArE>ay>3enDtMwt_SoNkO%#34S=kgzTxyRygLj2arjNlW_Cp^F2Z%%+S+XDvRmME z9*nbw35+n&LRFdEc}F3uS1ll?9$ed@n3@X=m zRNvy5NEL=&Qf@!}MF&iz0EXr8q-{rbdA?E*sfn;>(J1OpjFgX9nZL~T;;WpW4f|Hb zp1>2&j9*MZnY`N$6r$0Q5T8ikVJcCWZEqCydcSf?el~skU`W=2C$(Z{NZR>n^hN33 zSAf(nFwlREg9)0bT-itxHaRYVPmq!g1Au(r5vmD-^zA(_Q*0^oe2u>M(<^wI9^xrF zx$LyP`L>tcfrTt)wPAE;1>G~(Mvqay6nP@))EPk0I7wHZN*YNV;vmTdja*g@Sd}TM zb#_rUSRwZ^(DQ#fu1eSmJt}j7?{ROwm^k{K=>B`a0e##$ZP;~s^s!4njT!;ibzGU3 zPXpRX3|J51#=aa&RhTw1se^%LMyNpXO5V@HSuf>@*XBM32@xLFgIZLzmmBG+mO(>; zFrJaY6(fV`$xZ?M&LBGUm=xB=YMjTpYQGv9>~Eri?G{)+RP$Qc16%UZz<=#HXnsDc z)}+5%!h8~72AhqA4|s^INMX-7@={v3?Mcvk?xT3g2y1g61X&N`=9hyb|A!V-nf`Nu zPk>vQfO!I`qkQlTWmfV=E14wLv-jJ*AjNf?Gq?m8{gZc-4sh(MI6m{+B?a8ZsO_1m zDT7Sjk0d{f@evzX0P48gXG%V6!)i?VZS1ns6~J%kS#~El@yybuSgQKX!lSS7r4G#l zzj0h+^p66u@yt2p0ge1QI5@{SKSSlCj&SSmK;DTYQeRiQbr`iGXUSsts2d+~DBYvF zW@#I5?3X5Qu#iDD zMtT8*kuz|8zVO*I9`tn;B5$R3+VR<;y%QvG4l$AHNCWXHGuC#8VnH2$2jGc9zt%om zaQXiXE(6sf`Plyk3p=;NUu3bK_|lavAU>q~2LKWzX9X74mw+onl}%_;S#%fbYd;S1wy0ChLAo~2HN?)p#pDDJbV z)WrUS-69r+3+y8BDNO$M8;>``+09Sr{XNk$z2Z+FC{O~~1UH!+7JafhNH#`_VIC<= zqN>W_0BPaE7$w!aUQNoaJ@k@~f_tJXM>~Alo{rOxqk#YuMob*gr$gkZ!{?IU3;QZw|hCid_%nomLultG9Tw9pIUhoX6~v`vcId-NWr26D*WBOM=kBYdf97v z4`FeKnb5ch6mo|FQX9yEZI+93x{H0SukLf&;R(tiBV-9T&rA9~!Vdwk#RR|OEUZQ= znudCV{HH>4rz7GpMEBmO5l5XKFJ$Y20y4(}p9Okns@mzNT^)!>= z67Fc}M9q^aU4wHK9ItH?<_SoSv&W4!)j6QX9#&-y1)kcrq2DxOk8yQ^UAJSuBa1nh zlx=3-VOn(`For3^@OlF>-<1F-a|3X@De&V3V3ier)A!b1$;Ej-^=$oK$?*?>781yU z{)@*YE~&R0owlO7?D3t~*lO(CCS9{7A|6DY3gqjzIee;~9;CeSu%6ssAzZKFxr)Kp zX`xIwrs4U{@yU3YPOIf7FoFBM-H)xL^Gig4ApOTO9f*CeNrB1Q{V_jWhCHA2uipx{#ttkVZ3gs- zi#2HlYOc|jj3+!r0(!w7WvPTMTxHSOG1$ofjx29Dn0}{{o*mW&lst92F3_6l_wwmc zt)Sb@4o6ES$c?+T-IS!vtbYBDg=)g9ietBD!Zq+iS=6^V{i%CLWUIFXw>?D=R~e2 zN7xjGqhj1;Ajm(~;ifZe^{Avi7Uf~zieP_at|2dKyOKnj&NGWWTmu&lGw zap%bt$XPEp4rc8%E9D|6v<`?uNiRp`k3h73u$S)1Bupwsycbl%Bn$WXs5&PV9T z^)9ej`^wrVbgY}Nd6SuI=;=8fq4`-5{eqE}1``sV_YZ2E(@~rh|4zs2QGIqt@+&hz z80SW~van{MC3X*3Bw7MEN9dzkYug|c&uk2`&ivNjBd2~4<`knnj^nlzW9^U=Xg*y4>MGRnoilp3K!FdJ8LY$dd{Wm83284c@?LnZXTv$q^C(}J%41` z;Zd?<0)`Ou#a4Vs5CwT*ll_$T7EWNCBsZ0t>1)I%-0pd&#bUq`tQ2`1;a;t51(*jb z0~p4cuQn3B-Mbnu7?0Q;cRvpwd@VP+?yqs7FIlhaD1ioE{=Lt~qg*Mq#>yy0cVIE( zYjk@$X2zwf-0?>3iTGytlcp2TgWV1ZYZ4UY@<9T*gh4X*#n8cIsIW#{{c!Bi4^hfh z-uT045=Eav7(pZ?tRNsTx3J!HdwA~9?)|tO*z}q$#1Kcdw~@BLvCYNh)3T#514Vnv zGK3Ky2s}Qom$=v-wh-NBcP)0Nb#Hrx#(&}ShWkWwL){FKrAV(Nr0aF5o()<<54LA= zDlwS&BPQj&9b5`vhXv-(KCEdqY??_e!m)X$YL<@a9DCvO~!cKofu4XG8<|v;-tSaTE zSVJ^7NN-`7bR5*Ov>|@PV^&59Ay)gT73E<5klV6J`Z*>NHp|oMvdj?^2H?QQBUOvbA?teL_{{gugwKUw_1WvyJ&^Q04*lV7S?OguE?Njnk986ht zj<`(oM|WKx1Am%sG(~x+V zs?N|Nel)>}XDTuZwwAEf4C8_5BsLNRO)#f_O}}YzK;Tc2?BLf7Sk{nz)z9Mn*yr=^ zl#J4!o9#i-DJ_2BX=&&OI{wuopBkN;KORFEN$eW$gPVLEh0>e#k~#ZUoQ-XNAFzFo zOQN_jmnXg!lf}U1pcR$3maiCHwOC*#QTJPp$LBnvqH^khoLHP}Gp}5IFENy${+9}W zZx4)I30LNG)zs+33j)lxc+$v4R*Nrs(dj?-f&jnq@nmi$sG9)KFaN^-hqCt!Yod$V zMwQ+n^d6)L2+|d4i4>)XfFMN>0@4)(r9&V{N9iaXB7y?arPt7lAVs80?>(Ug(oTHd z_xrAMoqy-wCX+CG*36#0*1hg^ui3|AGrY?8R3q>r%wB82Tzg@?D&Hdc`ArW z5CMD?ecMY4cQb3EQ}K^{9v?z16iB3!z{guCqaNTkSs18ijr9wmkw+u(YZJhA-}tt$ zOJS>9&^&u)P%crMas%1s{Uu<Ya2V_nESn^W+Lz&Tlbb}#ONnaDsOb=r+dw? z3r&cr^btOjcHpFqW&hil=eJ|g8A-CWQD<&joWKCwQf|kwzL2?I@eSG&;j4x={rkw? z_T=EE&Irj2)1SMY2l@H=$SJE&mzeNATEeG-W+9Sr{2x0eZ{{98s)_=EBmBGp_4n?C z;7@P&XO_$8LgXtUqQ>p5d@J+bUoKHy(V<5~-mTOOG&F5`rC?TEr%Ze2B(#=3!ycJQ zUS4pm{?$anY0al#G3czdkuW{_g*!Z2teBOshIR{H>3!N2=t%4GiiX3#&aj7QFs9IC@5e13{2>Gu=`8%)tUY^71C=YhqX@u>$8%iJRL;} zHFev_Z@E5Gg&rs3Mr+ToI<@hGF)5A|1kJyQi2 zjM)h>sAEi`HhxGN@xobvjzI&tG_240r$%6vflSXt`#L&;kL?O?uT5H8W0=mUAxbV+ zJK7!`^&}Dq-H6yl;C~z;kk|ASk zwoA=0Bb@DIy8Q%Su$36S*T0eMTi&-9&A_>B8~XL5ZOxl~qKGvg-KI^3{ES^%w^nmV z4)JDZ1ICB9z%{j>FQjXpqCdVzi`qWy@&1~&_=3*#<9RXsUvbY6QMo-ZbddYA0q*yS9By zw4;Wn9}zfZ2<)jeJ$nu|=emGDLYL@&($gOzlKcg0h8ZLqG>3LG?$r*3OE1q{OMk53 z6+RbnR5HK))Q?jI<`X3zDgQ<_ zh(HXdiQRsPz^B500XtFOnI(2KT}O~@(9pFE8{m{kMYXA6Yat!0sc!Rg@4hr6#l&wV zRtNs5@m+sk$8G2R`doou;luwiEf(WqZvD$R0$H^m$?qtKJRW5?vdyF+Rw>=yN-RmP z@_`g=n5Pr#8l;md4vo=wO@E)w{}Eng+?SnTqogRiZFYjLD+wWPAsG{9++f%LOT3qk zfUVBlIQ#oe;!iyWRFDTD{L67zD=#P1Twk?SC7)}b8v7=2r_MRR;U_Qx(Q~FxEJvOs`Zw zg6}mEUoZq0v?newF=i7?xFkyWbBkhGL_cPQWEl^o&A!+&xdwi;mJ1$Rs#I2HFDBMA zZYCG?9cRD65yiF10)3492grgGx04T?PL;?)441|H+@r_0?#k#3t+aJi1C9C|13VR= z%g#7rTC0zksU^9H*{&H4m^v9{Ylkt}w=ykVgsdf*t9;!hg1_4_wo-R@%-59A2IY+b z&Q{y(Vhu^Ir_fhJ57yb`9@~I?wKD2(KPFW}+=;aY!<&A@L~repwv-_8Ti#eF2qKRt z1?V=n|NHT#cD;Zo)ak$9QG*R=;HB*N4?#hH5+Y`SW=y;vx_`PfT<;Yo)>0g}T zBlMde$}lBwEg}QV>fl)AW=EX5Q$s`o1G{GC|EX-a_OG&G=>JwWwA9ge*<5Ey^l;W# z9;Q^JB~hmHww%xEZNSesp{AE4iO+Xcke0&5e~GUUBPMaF!mpXwgqT&dEAi6r!VUQV zuY*JQWTw=u-Ma;($=*Lka)for96zVbIAc~(M)LA2$Lv#n@DWftCGrM%dY6!#zhY*} zh`dMxV$+T6-zQu~zx)aJx#R^`y(O)cTk68liB>F3heYp5R?KgenD+vYn{Vc)3!jDxGFeiIeP#tb@xmYF%nxC{;`BoOK#Q$G zqqmp*mX2^5*u0>T5{v!uPo1MNTv>xQvh_0s65&TXQ$7C;g`frEU+dS_5I&iwN4ae; z+^V(BIUuqB4dAh8NN@m|-gK*8(W#&G5f$VR(xHPEnkoqq9{2fSEjnFSFSrokBBZ`P z!5#Ox2$fTqF5LcP1d283^jfU@!XiSlRNu-7ZG{z`m&jCu%eA2R>K^{wm)L@Bz)FOX zoEqO#8!8Fsh|Ni21MvvO@V1*5g-m%2g2F8I5?zUHib2}dz5KbmTfh~b$KsQnaAL8I zH_06x> z89SdvM3-&XKL#q`a9oB%_|?Ismskx0ALR{-HXS| zCdmHkmYtL6V-Kp0m!=1-(7xAVnWK8vd+*RCrfRSYIr$W>bQAi}4T^}^c`{-E)tBng z*?V50E7hLzXCE?{8J-AtP^oKu={^0CNQEFl#5 zpy~VAAdb5d`iaK!&>HT4gSSqrKyhmZtEE@hv$qtYG_xtaZ2bu-PW=*%>-+MAj zE4tTvPjA00h|P$f{ITdX$}Ue09etNEe~Ylv6}_x8D|b1Yk&-dG^if7l@1O(hdrHN{ zS?{rAVD9KGbE5be6)KV_6=|*F*O{jl>-1~tD=rsT$PIB|jRV=4j(aJMxz=XcEcecom^mf?U5QA=6g#@5N9L;pd<*k8?O5cm(#o%oULXx~Zx!(Zx|jeUAzAc) zCjgyuiPo&e!6%#fYK1k)nA^j)*){}X#_okFfP7YEl81`zSKiF1nxQSMJ>mdfo@)6l zT0RlLm3~TiiYa8OgX&jLPcUI2jrqJyQ&X$LxbG4ZZQO76lR8O~8#1JJ;O6 z@P>Im`xh<}sJE5~EhF952)QtX+} z7nDQ_)sow6(!({n&S|x4ChsaAMKB_NO^ezsMXh|KebN7$0r_C(`NM<{6L_sM4&>N_ z2deos24l2ge3j4~w-1-C+YY!VD~oWl#^fTKHeBwF-KgnVX+*Ds-J-5C3=f}n1R%z z7o4$<4+S}ZGsDa@Cxu;ME@tKr?(W(7ZnL(k8`OMBosO#_EeVt&_0ZLN(Z!c!PQ z5L(21>rN+edaqq>>TnbDmVlLcHfCSBdAP7ut(#5(fE2DFU&lW9ojhPT7Ggk`y)GjW zr#eMtAJ593?=yo9SM?sT%QLbNEoSTFUC~CPx;XMPX=I=IkRKrsgSXm`a+Can2}ZSJ z`gTK>*P0`|XI|E&;7rnqmha^NYRXlQ={FfW>%Bltni5JLMR?Q;LiNmUs|l(y|F$bF z{opapOTAMy31|;dYeNkRX+#KmbKIz&MD-{N7j~P$OVIrn99{LT_1-ju)W_k%&!kdi zy_j<&rMpsbaWdK@dAY}mvEHc;9EBj}{o=JX$Hp%jx^ple|GFMM^7d`a=GZOw*T<+c z+}f6UY}v_N=QwHPX+SULgmJHGGB%LVrR-M6NI7IBif zZ{JlnjSr7EEF~REyImlAtCm?FR9yAw8$gp&sPIXcw^^g8bApVPuoK~&(<*g)%OkP+ zjWGZgju80pK<Odie@xVo z4p1Y3x+j^t`8+^NYHLZv|LJAfUF~VsMbdO-P`%K-TLV&OC$`+C8VV2gSBoS&(JebU zb9mC@MO=T=tdK}2OOqAMsPH+lSKtl4`er$_LkyD{4e9Tam*9BQpJ(Kfogk|%y@=6D zsbaHot`PkpGiIc{?*II)hC%WKYRHSQER{V(A8$R20|yRsa>@sm?dr>ca0*kZv?#>& z8AJdtU_s==d}r*!gj!FdZxkXN zp#AG4X}0ikFA+BMHekHo-O4qtaWe~%O%Y+trr~gC7~WF6KQ_QC*#AybhE7i{)QG-nHF3ASY=2XU6oppX8zeX4;^l~w;?42PUM|h!|fRzvZa9*tEg9u=|e#BAN zL$q<1Xx1v7-TO7TTPWWL_~)Jh$|-u=h(@s-8RwoRgE+4*y#1dUbbU&uuY{jKdIo(K zUfmbkAy)#HP1aahUHK8~HdNSe@*;yx|a=JUc zgtz^54Dx-AFBB#5Z|WT!l$PTHD+%hllI_LB)A~RGS-ODU=u1~Gbusxa^U4WvKf%np zFy(P)rf#fvi*UwSoxf5E%OoHqVzgqi7t;0?@cga#NlfuJ?&7_?@S7zttEfUMwX?Y@ zjk_JzmBIA(6R)%s=9N--bD#0td%e{UP@-^Gj5+fM>6atM#)8l@lW7Y*t2Y#D&I-d3 zQJM5=sVU*odn1KZ{}%SDb42AMDCz=+yy=!XK~=w17q!;%P0l!H;G87D62b^BWI2>< zP1@daP}zCU>6&Pt7%THI&Z0;u!$jSmQpf%}1k9=8>zSZ%&F%Rer5*GO!djHWqu-t; zo#IPOz_>WdiEoe7M|X2h zM#6tt)ej^+a&Zu|uoi%CJwUX?aIrPosdMpYHHxep2wa)oySYN1-+Cj$oP0B0EK;dv zDN~q z^JtL4{Y0v>EN*l2;ftkWmDK-pDG$)~eQE2fsW-mly^hh5YL_UYY^Orq7J4Pjwe+Ot zI*=alj5CXx*r1h}E7qDWHoRyo8R@93nmM9tC;UmNI?nyi+H=YG7EbH33HB>KwIK%j zEIY&0>;JtWKv7Q3r$4S#wYByy;JUl*FfmDmRF~O;h#_*bT$)r_&hKH^dHpto`$9qh zYPf_G^1l3{e$)or`rP+tERHk<3Y}vANwwsK8jg*J7#lNV4ejn)FEq6ipJ@8(vqj%Y zq`bUbp|;ixr+EG1dO6i%x_}^=G3WyRBQX3TdL#u>w;DwHRjB zwH<#0P^l8M_2E4pvU;_LR>W62vUg7M+1a%-4RaLE+5IaOpHRCe7oCxXGPLaCcAu(<78`n|%w@0;Z`7TB=fUqho{3bb3StZ6u_H4iuKP$SWXX@r z{G0qkv46MtUi)#(So>+Sg$ZA0PcFS2H_~iIY!9S=yHYtDs&1a6RwOmQQdFQh`aheSds=LUiGL}(JlBkJ5iUE^i8whg6*wv(gaWl29 zEL=Rk2Xnn~WY|l0oQp(&I+bRGj)9upDwz_TO@XHRs%8}QsE>i`r+6bfD{BW3kEDE+ z(1kdtpLcow<_)pJ_Rw+dTYOarF6whx^x58&`gqnI@6iX5k+oCbCrfQ7znfOQ-Jr3k zexW1@;s5A3WV`eGYn8(wvqh~xV0~lD#A594-{%9tgkb-pwhP;pF7hhdPK^vO3^bgr z!m;@xQ=ZakG>^WYj?E{eh9>-lae@}&_)n;ex*zy_Ot!18_n%Z>DWLg;n%9mZhHIjtUKd%lRV z75)3sSc{*>_ChJg>HaK>%{Gdp71>3L#GWkV2VJ}#E4MOw`*s_)-0XWmGwKt}kazH# zT-k4zE?3zn%wSZNluyMsq6i*BlNY!vE(fZE9qalt53Lz?tR;dO64lN*YhO(>%z>`N zYF+0ke*Cc1G~1ILcsWU9S?fl9+p;#4{g+jTP@*acqZ*b;^>j>=*wN+J=VvwF#%f*V z{g?5Wm3GWwc+gQB$6?b2zsuW;g|WgnEG~ZbzwntR<>t^cLt*D3S#fx@lC@eFYEC?~vlJYR9bz|j~MFN%JJ1&5J(PRKX zbb&m!?22_<(y(cWtml1b-=$H0uyzx15hwY^7*oO{A7#5GeXiI1}0vvnWj z{$4~v4AD>ylgH+jCwodh2*REJRvMr^;7r#C`OM|p+YLan1N8Awse!0&gojPAu?dvs z*Pr(yrB0$FY_a-eRRSM&BARzudMX8Cdv$&G&G(8g!?etTbrfpuknQ{z)pa;W#URLG zrIn^0fdiP)?*^jOs6N+f!JR)?O!Vp~z|OF0-uUMYA$d|cc`e=xPE8;2e!`2m+Lrx# z)`sp$|8>b84$+(ereqSRrXaR)d23_iM=NknIyNc)TMzZp^vX+BV_m3%R+6^Xw$Z;I z6sDl_-Nj!=m!fncU~{X^3IhUbEfaw6C>?s_<2NeL*UpyRiYg$ zY`1us0NdCIguqt7ll*9!I+P_}C8q%uJ;l1ZU1G11&eU_awJUzCuDwlC-Zfx(Zk{)^ znxOcw6f@K8Yxx}I+%^k|dYz@>^En*hHPMnZQ9|x!f&W%R2vLJ4312*(zLSK8!0PFK zz7HJV!W_oT=kKUZYjj6tuoT}2?ninKE#c1P@^-Em*n6_0>nqs~-q7 zFx+0RQKgM_@dpt#KKyYyq631*a-i4XcrqAv4H)Y}zyU)T2^TN|C5N>)0DDBhOJmO) z5RtUftxQtrU*yPA+7Nog<(bFwe~1C`KSZ3s0zezD(3hjI>tM{z=yDMlBaS61*T4HG zavTLDZ1<~Q0bkt1MkIK)1l{q+2;bQaMC* zQsjXLj0caKComAsJqQ#_S5pnk5W*Q`g@8j4C<({X1QG()kU&?nm`m-bN=cR`Iu9O% zn{d~D0-{2CZ9_izxOHI7sEu^3H@9E-D5+u8>A0aF1&80g`ZToviOI9dE9LgXX#U}0+ z&vBOO)|M*z?AWg8pX0F>ieRP(q)TW<+%QGssTVmBu!_c4#54ONA8V4)kIl15T8*d* z%E`%Hs=jJ<`OOCN^sn$UtE6au7LTMKN0Pgii*mg+GZX+}lACcvDBHh_38uR&{o(2I zUO3A;m`J44%VJkaU77z}L=_=@-RmIk#jiI6$jrE3+&M7WQR^Zl!>3kf=JT))#)=|_@=$sS(ay$|1zRI{Le(lx)^j!0 zG3V)4p(^t!@7A87dH1SP_h;+UpwByMie06_DqXv+sF&{Thdo4)LcGaqLO&MKxHLM1s z$W#HWbYK6hlrX3V(By^ey83^!@BgeynyaivI_kTlFS-Wb`^|Ru^-KrzywDDF7#Z6Qx(>< zds7ucm2vc)H4=-p;$pp30Vhr~KLTjoE|*Es$Zp2vf^w^s0(WBX3io+iO#Ok;4|~eU z)S0FlGT)RS;+Hr z`^i1ueNJ8i_8t^}xrOYFWl_HrL<4`OA~({DmWND=mP;!QIu#)>`lXLB7VPeykW4ew z1)Qh^q(9#LY?2$I30=Uba|_`2z&mp?@~L>dR%c;{=9thI{odngY6o?G4&ZVVNpV~3 z+iNi<`&xP|>g6%sqIR==ryc1l0+wvM6Pv6`*2EKa)i8zXr`AR_y(~@mQ<+OwW5TYROO7toZ zrK=;tx||<01nAU|-_bfY_tvu(`fx{%GZrEkrzAeSS#5!P5hsu|drvt2nV=o}H_Aa5 z`VTB)MEnHEh-H)T|M5~m>nRfdNW8(j^Fmr$dI`5k9XoNs7|A7=qw>R);R~x%%z(_N zPi$)sAD2jm#IH+sIom;hV-W*tuZZ1-B#2(vBvJHf5tdUbc&Deb`e~@NnxJvSk$XU& znUx^)q0i}>>TKXO><#G}ZC(x6np3qy4VXt1ZO=u&dh7TJY!q*1NI z7q1swLw+M_(W<1vPev<0Y%)Z{aifWJ#*ZT^xX#}?f5uX0QFbW8E21!2e?L#LTXeGN zo)uu&2T{XJomWHkBjF8wrx8H>a?A66+FE16499UP3slgJsN z&!zXUA-%ydY6qzootmXc+@jC?9nX|FY3wDm@BFF;j*USU#U0(A#tgyn(=9Noi}B{2 zT~Lqw^j>G(&;;WnW3MAhFFI%$3^!;zV>J{1KRdThFQ(|x7gXX0UI43HtVqH`(op|) zNe|M}>6S+Rc%kk{r?`&i3@?Url)M~@Zz?bdJiE1sNWVH=33g;n&~9a%D0-}j>}`%< zV--Oyfpux%;C~+Not>=Q1L*Y51?1tklZ%@hG0Xe_iYNQelb<@}d+7^46P7Z2AX=E* z;beZN=x7=AmnvURtvqDHU9r2YHS-Gs7fthaiNtTZuj z28=R`zampkP`cm^e=6Dq5gLI9{3sDVtA3t?S6TB$j^Rf`WyJJ3&s%vvti<;427}TX z`p@omu?JgbF{a~8dv`e2&LmdnFy_KwMFXRhLy`=c*6ioBN8NqXN2FV_9x)pCYWIR; zrzMKYpSNIY%HsmA@V#?{3j`u;bSQ1m`PZQ~r{&DqpZrfzPu7OQCFr2fNdAu2Cwv`+ z4gKvq1#!MW$Y`vd;xkuw&Wd%u$HGoIH)D(n!8Wp@J-~oN3fu{gTA+gB{VF4a;bJ4b zd~c+D)5d1DPGS?=n3ooO>9acNOnq=mkGcngWjcbu>9m`ma#YAmyDcIx)rX4e2_XC* zlmCyIt$mOOOF;9K>w-)CI<${17Sp$4?tigh`n9h|_BQj8RLulZDt5SH-=SsPu!(eL z$A5nhTq`ZqGO^dWW>^;~s-kv%dcl*guxR@01o~|En+YKa6}}d=g!#nrwX?u>{??SX zZpx%hX){bAV;Sc=5^$3h=87^dhV`;(?IV=79xL7CM}K+hM7TOhj0CUYc8)I%vt(s= z(EfaBOOLq)ds=La22=&c(4j>us9S9}=ul7<)mtfK!317Zv5p`VrTHpb8HnfjC?qJX zkjgk*AXsuUWah2DY8yYBZVDMKfUS*S1IZJ)b}jUpu9o`K z3jaX0TPgRT1f$*0dKVo(C_ndD4k#|NE=gJoVjY%M(r$|ncKYpU2x6%q9^&d31R_-3 zW&z5DKx@3I`#Dy-k9V-E*|k`M`fy*_ihm8zrw-{{Wbu7<3ApZp$46h{Y#K*d+CT9@ig|)8)F24oqI0Z z+mi=RwkL41B|){0_RXTzmHgH<4v;A!zpmuA%Rhei8oyAI@1kJngfywU=hAuc#UF5q zJl}F{H#bza@@U&y>P-7G6heSmH|$5#{6wZcw;O6+%HC%EQ&hTw9C@B`Cm>X~BAQ{F z48pg0BToU^r1*?xIa{;zU(h;)xx`GS|HPG|$c+r(^4ZaFa69ETUv)zmfx8TrEQv@f~2wfGKYkUg%;B;*I)jfU2>D}`r@;bI@JmLvgQwb zr`>3Dh1K|e>WTLNHoK~p*m|1RQywjMCxTMe4qDC+3Y#(JDOp>U!d(q0+3R)wqR1oZ zqZ@3OaMMjJT(`YtBv5{(V_noAUz6qpqvYL07 z(WVH~$IQ!`^*~zl+o92xpn-V6S(xmO%vJf5H=ReapGCnt5DuDpjw+P5;U=iJ`O_@H zM-dwyCmMg`&dUPB<@p#j*65taIzKVNe&1h`5$@@L*L!}4;ys1~)ppPpQDjGJ5zB2o zimYk^1J|BZ<_tKu5%+K_=+gh1Mhjii0gmMd{AEg}fr^yOB2vDud>}1vC%RJ1Ky1qL z5$EY|332gcWgNE@+gig<eozz$M1t*CaCkXyeUiW!8)|)$^j!% zML$aY3#;BCUOWIj59A8eDwAFGF~4+ zK8W8L+nBIinZGSO4h!D5_C*)(s5dXjiROz1cnkUZl+Kj}t_I2t~KdBN`L$ z!-gKy&qDJUSG(^V4PbycOY?;8cp{j}!O==22K3SG?DjUmVYAvNpY1jw8z=ouJZ4CT zDem?x$bEhQ`m#W~$8HCZoO2;Pdqk)f(jLqRRig2hCOd^Pt)wEG&UP0}L`wRY^gfQ- z0XH<6j$-lnU;+j#AQ-SWRetL)^DpN`)|l8ljw&Fs$YW&C>4AC_ z{b`IvgI7GS^i=5x-UD%BW?A_pRYjs_`Xced+8TSN3VUdJ0eOZMKUs8wKSpB5!@K(` z4^Jv3K?$7~)&1y_zt{4+WWLf0X^ru}^w<28NA!~6{@7GVm7g84`KhQbx?~aNgOZ&V z!?z_F67D{=e)-;G^!s--(#rnFZkbt}7N)@U6rH?LK(Z`h_o*MA2$=i?zi2f~4%X zQ^ziDZysu)?{smbxe>F*Wrs2HU!u_88o8~Ag5UUO(H_xGdUxB)V~};xw6i3pX~h=+N@1n*J7#eSCid!onT)jVeHB7>8gjEhdp zTm3YjZ(NiW!*<|b`PKN4uphs73)L^Ao^A39kD%7UjN(rd2Ft@Gm`}7QPB2|qHmyys zwa(j$RjIq%gEywK6x}}y!xSrW^eJM}A#R3!jPDYICA|90Vx-T(zG5wmPu`~;W0r9Z zf0ObLN#iB{`d7W4&3-$QT@X}tCz69kdVb2#{vtb#LX(mxBD6OOCM?(?yMf=atQL#e zvK9vqy8FZ6CyQacX*|MctRdghR{ITIe}XrWCd~zZtL4Ja@Wu9v6Y8>{zgrs^+(XCy zHA6^a<1Ko-;GpjWH)t3_7kqX4j)->46hsb-K6&YHOf^ci^6niu%M*zIN@OjA5EpxL zdX&%5n+4c|)wmdBYFHux78;#KREI+LOC2$Gvu}6`W+ZpTL7fVJgrwvGhF@>~V?dRy8g?NyFu2-5>toZF=g=l9Uo&3-q#ujh>b@mHSGpqpWNc`AHYj0YTX zToknC!{PB?_dmABFO_@vx&F0idBt~Rk6}T-@@4NV`^zQcaw;L?@YmL&>LtkSbKww8 zWzf>=vFb;Z{_RI6L@ev7c9HF@GSoR76WqFHKPHahqUkSOlePdc(&)Ya_~so8Mj3ST z2S4VV)Bn|Zwl}bS3;g0Y(w1>2X`)o3j<$iLql#Kn9B^K6FyDpt>x%>t;yaZu_fA~& zmpXCFzmzR4gv4Tq*Wp-Vr&G1lJE7U zZslA9=~73NH$g4(eA69{b ze7=rmg#2^u;}}k94bzMle`A@o%dbj59C1(%xaCFubG8VgsRl}4uPwPeMEmwy6I-)$ z`d&8#r+xf*uWfVkWgj*8Ij=y?GhHa%uLv6e=uIEb>b1;YdaM1+&aF2Xl4j$O2k%>K zh*YfG|9i+X0aoR}&N|vo3$jEr@y>!`Q`aKps1>3hw>Wwlf|cQ^s~&M?u`tBy#Se7Mf6Gg0PQ;d}91!`(bz?Yg#_+!6*H1=?sq65di>68W%Ew(KrBO(+1d-p;cShgQP^j=X_>x=@t&?Is^!9 zBPRTZ0%{g_rP~C=rb+vkFe}fMDClp3pig9;v-1oMc+)x7`5Y*0UD9F;=5KaQEqh7j zd2#U0fgZJ!mOu#^f>T#{sHl7QooaGKOQ%`|^qM+9zgg@1J#((#pJ1kQlXrA;84V$1 z1vJ?fe?3rTjNtBX6{Vr2U5Vm(9SZ1vHh)!~PX(M-{@qN}~Q(i<+JQgT#TU(K6>MQ-Y|#wWIY zmOx9-w?Q^O6c9==u~6;YyY>>AJ3QP&dBp<`jO=M^I3e6 zRL;xg{=Jjwi%QVNV;RRzsrzf1)^f_lM*1%P#nDmJzf?4i45xS9=;Y0ethejJ*7hP# za~aSsNiowGOKe*l@w0I!M?mv`58p_tDdng(r(n(*bKE~`y%Z18r6w}DKJHhwD7*?_qz;7{V7M`M%=TbjS_= zDC1)TNdw4$+O~$Wp|&aUyupC>8=NWB1W}VwK(aox2q=inS}h1FMC%`btX57nf1!Nd zT&;#CKBU4fyx0p~J?*^?-xdE0o)m&sF0vO9XD}&T@(T^&6@Qm2;WZ#oji?gkmCTeL zJSb6J`C;QQ0Bq%#h}eSvO0l8A)Ju699DGov<9x5nv}>s53*I3ndqy{uB~f=Oc?Q2c z7|kX?2<)x9h6p{Nj!u7L&D$2|@O5wMkA@ z`fIM%7?UY_hEqkkF$eYx<|=8ej^-*S9gg=AYS)9gY)bqZwA;}4wmQzq-N4u;#g+>S zG`7RIi|AKfuCN(d{$Z&feodxgzTPpod@==wJu@7~i0)B$5lP3K@-m(U*8;_efTu+} zV}@4X{JFd2I|ZQ0&u)oWCHJ{v85{b_R_4v&JbDok5rNGJ+ zTv)BR1eI?+8Bv%4#4WVFQY^EqqshACesh`|^n$G1@FMp!uAuPvvfv*CA#U2Mt=W@= zJ&a?WYQ5or=H=glDC*0|adjV5r5?REG809MD;RPKRh9fQ?6k*htdI%=+HEIEgw_)~ zV8K^pyV}O8mj=y-ifu#uT$H&5O56m^-qhYG)5Tq z&zsL+m;yJIeb<>w1af(4ahqZ7(lApov&-rR(Z3D!EXGYnCZ}o-Zh>7lwPL~Xaat$> zwin+E#ht+N2{&98T}#ye%-aC`mVT*`({PGc28rn~h6!#4gf3D=afD^8ohWzQo!+yd zNiYu^6X61;dadEB`74jW?eP?&~%K#Hj->Hl&jW*HT<931PWbZiW^ z^LkRZ!kC;yM}3Sf7!!pw4r2oRZ&{zUi2jXC$j$0++I4{bNBV?q9QS&buxE)+VLU!m z{QTvZKhw%vaMJ)ntaTAHbGaGRxLA+L(Bl-O`--2Cr>})beVe&@kFt(M#>1g>w7)(+ z%mb8kC=%nlJI~#M-H5E9>Bf)xQ24DpbY>>kErnE%8;QiR)BNE{sUTLK2P`inFUHmU zGo@#hXZ1<0D;n;WOG9LDhJ7+Q{1kE7;{O(?>mKeVtiK-Cvwg_ScO`k6L__IFx4b3zN3$a z1(;diE!C|FrIhu*B`hd*MSb$f;{4RZ5!8=ix)BT#3)EiyAp7gEB^i(F2j3c;e-~sa znI)gIpzR5!{l<8_3d2|Heg50#B_Rz;3kH6vj)~u!D~Wtp2__V%EJh^0wK5X>7T&p4 z3S0bEh{v5wcbgJhU-;@rIthkr&4OHh7gj_2Aw&|yM)m!b42!lnRX&R#BFOJK{(xi9 zxgy$|S^0NS{u{e{wXVc+uhP?t(btubhCCiY<^~vI=89?w!f5`L1mdsdHe1vxAdGV3 z66SnXM&+u0zb5raf3#Kn>HJ~0(t2)(?&&9Bg~L5=P}HmDFZhiVCYb#>4>Q7c{-xm5 z8>v8w){{jQ@2-%E_OquPh0umYNoaD%(J|C{SF;PUPe1f1Ba!4ySbJQloVdeybK9OB zuA*)X`7quMphb|bR;Ezb7CBoIbn@kY_58)g-`_FyH7#VDq0QOx2kt7 zQ6Lit>6bF>jBs7c{|CBeq%bHh7Pg8pAxwO=bqo#DZ+xpd<@?P@ObU=D`nhSl|Lvb! zUVOt3DjhBzpATCBPhN^Qd+hTl9J4i11b&U#0>7Cfi%qoq&WyU;8-@2-!ol2qbE*x@ zQS{_aNy`tZPw8|FrHk&SpuWu<`EM-3Oi~Cqd6ByyZdFgh;o_5h^0h?6lrCH<85-iR zDL)&|5`DyBMdV|4{$)is=!WFcP;bzQmMDjqW&4hOzZ2#3qUbl9cv-G7k)#S3)^&LD zN8H~)6|P!LH$e_g4@rM9t&k**^(29cx!MiabRrl7n)W%A9^H5L5xxFD zgsAbM2Nn{U5w^lq5ZA}?LX$nC!){dOPM>cG-a-H1TEPU4=wT^b{6~k4tabYuw67Er zayHR$*wyuqk~?%u_zPG*tlLrzs**gvq4T<+KUAO_Y;{}r+)tEox$qTttJ?X4ZyXWm zWR2~Ga=vn(#el7jX?TU;!hrgyu5o>#|4pG0@th<%Dd^eG%SnbQ?!OnVy34O21O}lU zrNPlwN&X~UwBMHTQf;jCcyRvfu7TG-_tcXJs_N%e>F~(D25VC7p<|JD1sMwrtnNrY z#i1{UfdsO@QsR3t*cRL@k-kG(_Vp-UP;Dfa$ls%A&%jI;No?RiI4u)tP`5R?57@#ZhtQeoT3TobTJs!Wj{J{zMFtAg+Lv(5PaD{?TLK-n8BvCu?LoQ@ z)mq+lNjzzfHs){F<-D!s|1G%c^#Qlb?+C6*HzCq`>85q#j5}WJc6J#zg#~&qy7+?* zo3=Xtl$gw1ITC4)Vds^PZsj4Qhynh?N#xfXFA!W9_@ts&mqqeVy+q~Ve%rhl4(j)@ zjo&;$ao!KtDA?WXr6NxQb+U@OKa!i+{vMvTTQ9U5D! z{WE)j@;-4;d`m>023y<*}ME@Nq!989` zLAH=FQDmqrt_ZSN#7|`WE<}}!e z*%>XVzIl!w*_1X`7(96>&eo~tXfRa=|Mmy5EWszxI%ODA_58XP=meC{;U2K7_HTtn zBo2O;ZlhBZoFbA9`mgZgcFuBI>$6M4jll#ac!1Uzr~5M=*(irG>T!K9J@S*>L+)kT zSfu^h2grIm$1N^&^SuQ?`=6)eaFGNJX^yn#GPCGrVUi^sGwXN4B#2wgD{wxvi+4Id zAWN7QtUp45o%d+fNcpDLq7W|4ST67dvlm`!d)&ClYP{3+nRlsy^_+DyS$OYA>PqBe zu!14;W+}(xP)HsDoRVZ+Z>x~Jr}Kq2to{%v9kx2@QKQsPV`q+Xb2&ru{ZTyrBo$}N zcq;7+)6dgd5|sg&P{dttBi06rQF&HF)}J$>?QMpl)PSvzf<-|#45e@TJA>mZoVDH< zu}kxQ4bY%EhOfv=hYM~mGI)Zn=$mXg-X=S=}_Me`e7C-6XoIA8v=XWBY0^Q8bO;@!DNT@Gq$eOGKuFH!zW>krJmY+M$2sGCgX|=GXYIAt zo@>tQ`mG#^C?G}L71s_4M=e2!THhYstwCSS4SnHD{&KkdC!Ji*Z(O0LiCU~HzXp0E z_c1M-kGL&S?juz?5j|5HH6GHbIi^-k20^r0>dSlFtMscF4I{6hFRXIZyD1@$!wtUV zjA@+x_1{4(l3G}Oca!~HJ=0P@WDzWN_m^y*D*9+^JbcTQwfGx#5Krd9-__zpzQ?O^ zHEO`XQVi$~yu%=-v6ck&;$R#{byj%`wC1nKL;j%J-r|zpLO&n`aSNM;m+t3|T0PC! z$h{3LD9n$y=8E<<>WczS)rEEv3xe&xd-%F?raN!G{z*w-#r!uldDzxa*BNeUIO65~ zE(&d59H6ma-|U+}26QayudR#RLj&e^&U_rk@Pz94JZ>A9PEG!B2jY}ZTHwtzfe9#T zIHh%WvMktu3UmyNv*&u}SITAl@`tpj#KP>)93a-d9i5w4Vo?>Mq-g3D#_N6^zREtp zhUUxviLb!7_uO$hdo#MAw^M~FPJbs<)`)wO(}{o3CYJ*n`n~AK?g!y?W6NHX+*13O z*Iu6iLRMvwD?`%npX08shZb=X5u9p>UF;ld{V7q#a@J9QG>&@a?hdtHB5r=Ku3V;% zg?4q%IRI*h^g1!9s5V$B@y7wvrC@7|{=R1!+@Ub*5m|$rd+kEAXfeGaTiWa$r9+SPEWlVfLqh3ZjfhqYio90D@`r*4k2=j{v zr#&LK+R5hHq=h|I-s+6fcA47-e4*NhT0AkUj2P_JL+JuL9!`*kF%k?T<%>}GsANgA z8*k7t7E>Yej51LZ+l9CFuCJLVMlHIlA z-AHZuvhlM=__xU8sz;Cjv}5SIOw_Q zuoo7*)*?1+DME+kvt4;hR|?aoPBE;FIQ#E8lMGEup5==Q41f{p$mLCSn|er+AK!#6 z{s0->FUTRQvdY8isFsOc9N6` zzWi2GK>(@n@yv$wxB^Rl=NxyAdt!I!5xf6xbcO-!N7NmMBFo>e9ha28d;|9BBR`^GqKq?Bq zt*0A);ShoNUT7$rB|1_PjNhIaReR>%TE@m{XWt)r>rcveN&@yoQ6)z^R#Z^eROf8k z;zBUJ+!DhNs-Qrut}M!!FbExX)GeYb9%g%)Q4{b(`F>2Ow9Xl7clZ!5%_GM)v$nwC zr036Uz1$w@RuB@Y_vkBTg>ad(n*8@*S3I$*2aqiIhVz8KZbNTI8IQ|1@wFeIYQb0=<-=8G>Pp!9CT)@{}ELZ{={iU#z&T`7QNIUb6 zI%2KK|0`!gdnBc<2>7bQ9+h%foa2r-WNp7Yfw$Uc>|ZZ6gUt_EHC6jXSk(OU(TWGA zAGQ3HE&0$H8nv$+3fa$(u$%G-D;Ex3@$$3#dtd6Ps`W7W64LHU^Jg5^y9f81D{c{n z^+7W`Jb39f5KH|!^scfn{_1#WRzT58yL&PkPrZ#l|0oqu7#*M22U&kz8GpLa=@#u_ z`L^nQSZ~c-OwMzlTcry_i!@l|xjOojRd_}9UnBL~VIjJ}EW&QnCeDS=qhjHNT+NC7 zK?q5zcy8(ma8^IC)%G?IX+wH3i1zRZ284TRLi8R9jB`Rd@ zur2vX48u`B{0SpSWbR2`H?T)r`?eUZM($(NXaoRKvD6@!X(ru=XKbs}@udIs_mQx1;t#eD%=1E$%P z-cDB)ib%YBsl-0R>xzKC?4jbW=z6Gqja4K8S(AbDpT0#Cv>s&*Ox?yu4UJ(4$ka^@ z8Go%(I-%^(!y-#8bbRwwUGz&urUi~VP4Q1UxxTcY-<*gGME_A^$B*P>khbPo^ky8n z!zO#PMs~KGuVOijLkbQtuTwiC{?xNlTiw%=D@&$NhOti)W!J;1iAt~Joz*{6Z`OgA zSH1K4dj(;Q??b2pHJ-D>&Vc;Gspz_(aepCf;L{_{c(sJ1)XRb7CtstiEYISkj)U;! zMGR^vwB5%8%@0{oaTftB%>#Z|suEo*w)de}pc+|R^JIT%${M`~PuNcKT`x&Y>(4hd zpDgW0K%jTLJcO@~$tU_MfyyV^H8gjM3VBcn4(xo<3|nB+DPv3iLLE35236Hr=9V0FpV%EhQI(-h||b2Qo%d?X-Jo5~6L6nBK47kRuQ0 z4jHX}R@y@zXjA~YEZx3J#mu%*@ax`GLzw$NAG3i%u`(H7qXg<173VKf{+>5ec8)Le zX+m*rdc;usXdbaPn!hjZf>RnC_f|7AGs0M$HR(a4q5Z_-I{DQ~QhohNwZN4*@**Kz4zz*Uz$pu`hWl{3Y%DUmearhhYXM`Xe+WfuiSvgV9hePbNJ?FRB%0_^ zmD*bmyb&(ns%@`h`E)mvwf%fQx1V7ZZv+|wORwr>NnZu6Cf+*2KKww*fPYBl(Z>*- zL;(5_WjwrGVEuMyN%inro^+03u#E8IoMPLn<=;NnHI!Zd}E@5 z+}oD@{3e^c-or`M?Q;xiQ}xW*!C_}hey1}30Rq$E*F6A<6IfVm;V~Pn8 zQWm*AYp^1+%KV^L!84S*{9YuHIc2<%;Ht8XAjKy_%Ge|f$bVp0^Aoe?{f=Y=OX;H^ zqrEp1Bx#2EFMF%OF#4PPMAnLE8w-k_Gfi(rx*t1dRN(kTk`iTOmLPS|$OnUKrhU<9)R$gIS7{}@-(3cM)A>bGB`!b3#c*8D&v!JUoKF` z#SZ4)T~FvEZ1i01 zp#`6(fsekcc|s3KOOR;iF(Km8_Wmx%W3zH%F6REiSelVa4~cER<4{l;i?~zdAe#m_ zUX;V{$J(t~r_*-H^(Dqcnp$u?NLQ)jNSDy(VaY93vtgj3T5mqfl^Ohj{*wRxPQYx> zk8wFw?#mQ|0X_y$?3l;X4?1VNSYBhl zkQ@1LN1@lsJNT}7 zuau>)&TS>RJgWl-|aty0S(#v?=3fk15g!N(KN6(xjqk*&n;%s{`2jj2&q+G;XcsHXF5%xCKZJn!R_;b%cdd0__V+(nbq9;tN{s?E$XC)VM9vR5h zihi-shvqlSHE!+09U_TjOnr_K@L!+~!`di+6~LmkzJu@f;`N2C(^?oSms?V08IydZ z-SFV6QtB6}z&0S)U6HD3U`7S4BLFPmS1BWL(`D?>TFwlIHEUc{xzWcr#y9Wlp0&=t z)*PdKlyoCEzT^j1b*v1QRgfi1gog5iA)%!s&(sAEv%+zk*xNM$e(H0wO`unhY4W?w zi!OnW&K%xnjzgjsY+Zn56bp!uoF!cMF3ZQgZr#w=(U4l^cgo&!)GyBIEaeOLxJ8%# zz|U=-R-A|@OmFQSn4qmGldfbMI22$d_g2kgJ!+Az%J``RzAY9JJe1)`H63vIo7zFz zXZ>3|siqsG!f8}-r!t$~pE3BSQ+wV1qqt=B7JL@V^BX}WFcRYSKw9Y(Ra{!O1 zc0_w?8w5s(x~Aw6-_bpB`yBwCm5;) z5=8cME8~-Y|D4vkD=NdC;Ag#oPz(yS*jq336fRbn_A^(QcWZLmr)84(Cmq-MdK27i z!ckteawWk#^Ng&!n7c$h28AM5-HcBDxSV2yTI4YjnV?9?0w^&B#e`kt&CKmuWrfpqOU_m_1K)d z_>H6whiLjcy`68*_BU4cfXA@K&tP5D2AuFDr{`Lbj38tepv+A}s5GCnPWz{Q}UO`|T9vPYLY-xl&?1%}WVfLKy(47Fv4`XnIie%H5^ zf>&ikJ9EG}s5pf5h0_~FN)aLA+~jluQgFW5)GxN0>1-kLJzacBN*up-lDzJ!ZI3gk z#}O#qb7JeBYOM}AusGnirt9BIQ(8aGBRq^W_4{!L`b0a|l2%%FA#dislI(=gi}o9C z?d$hC2y%o-wvxMK@0qyIHx||7wlC#%(alLUSLC_EnI5gDW9gT)=z7}kk+8++NQ#h= zs^+CdPA|0C7ul&|DL;Qv_unt7fa=r5sZhx|`c)0gcE|sZR6&$=0UMsmv(DXLE`#P_ za-H7(Gm=sLvxb`Or#45+{iuUw)O7{ryMzL47*re2Sw6!FI_jK1t}Nr21}qfMt64-e z&;wleAZL1g65>*5Zg~d+`Yd*(C$ABvzzaRb#)jh~n}kkb<;lLQ4h_~Upm+=o$3siM zs}02iYH9YYY0bAf(u^nkp(gjVW#hSLZm;ss7~5XQMl5KsI=+Ys53(IsoVl#v7iR!ta&_`3;Um)l#s0KN}DRQ=CfngK?_O?4q~u z_pHD!iia5^6dDc$SYy`hYk}~e^TP+bjF=Ei%oXVK-xI)|&JMMVLnI`k*ODMh838Ia zKlV%p*+cV_)nEIik#)nB_Fnx;TJ@7W*A4^&ThJ)m`rI<76PuIH4EA69i>z_9kpBTv zJQxrXv>1l@vjI_A!AbGU3{Mil(1%PO;3SsDP#xz>%=&N8<8im#|0Kq;=)c_9B-=G zE|+VJeFGiGQ)Xx1QTI$Bl?(KBxB;`nnDAER^O|Eg&C+&$6BoJvo&7sU)kCN9xY_=8 zlBcXrYUixpnA_b;QJwN+EoZwVQbJo3S8n_k>MZ2#glFN27Wt700huXDe&EXB~bF{}^!6h7{v z+p_AePPm|nETGdHT*+)b3J_Xc^zJ&4Uo2tlc#@i1;MWGw&tcklp*z%!nE5k}cERh~rfgP)8)=sxfW8ELgA-@ZZ?9 z<(NR7HbI9Mal_yc>liPW?JSh>%xUb*=Gy0iCx!lIwGZFh$g?zVGMnc=13-x#uoVD zsL~oys_|F)$?~*ruy^A9EZsFHpO!E33H&R^Bc!;Bc0m9zK|U8iSbDt***O_tZrQiq{%xPt>%lPH$aoZ zB6es?i}31G1GO=JOgzDzLA0e`j1Z7B8e?)YVT24xGrS^fHfq= zj4Ke9Mh$(boX{TXAy502n4b~%oNlr0-6|z3H2&goxMMIx41lX}l&gKrNa-Pz+6lKd_)4PEYjv>zDG{(WC5O z<(*8kX{z|6KNUvd)jKvyz572upRFgch!FJSNBHXnUy~Wf(1cp{-RV*ZR(m~BtAl4T zf>}Z*?7}@u6KqaNpWDQvWXTb{{UZo<|4AjDl-l6EnjnT%)&+4;V+Tr@dfXKsF!0*w zHpGs1+Gy@?LuysfZel&NMbc<+I2ZOR;Ux*Blu3z_tC0z$PkM^c>&Kko4`O${drPUC zzj~CJ??hCwxs-d(4GdThS$9QUp0 zXym3S$HQeDiL{ueth~RHm1A)axYGQ|8q!wl7-8pFOxz;whYl*cLW5N(k2QvRg&sU8 zgu&k68_!d5L#?BqAQ;p*;8p_Y;>?-;&|?H`tDJsec`ov!FUz%AsaQ0g>``R zqhN{V%MjXP0qqrhL_suxUE*hBXHn#wz3e@etv&v>NzZMP?zWx-@{hND021^MF1Ub$ zahZkrnz;$;NR7`d%=(9q2@u-y+@AjPr*w}*HUMve{iX@%o3lxhH%IEST#M0O56alU ztaW)5+IsG9#QfZLRlRUIr%E22Rid5rb>k4$2s9-rT}C)loZ!zx;*iMmeZE~)OA7QN z;yg?D>uZ}No#}1&uX1VZ|NYfD2vef((#g5M4gD2YmeRikqgRYyr`LRyI_%%w+qd?)RkA_nL^7pfJON=S)8jC^yHY-( z4UCbQOF$<8sG_DQKph1dqJ;^`(>12n{?wBWiNyJ!)d4)#G=p`VaA|MinGAp{osDwCCwgXQJuDV)LBxT zzne2f@fOIEWRGbaj-IM)E%-F%wUAgmuk2MYJ2pZN(RtNf`+?oTjCz-rT#X2?!Iy8f5yVTBI7z6D&IW%);3YsxhF zdu+iHQRZA`KWS>tphL0ks-L(EzAx}S?i_Hsj*O~a+|^r7?W9yQsOaOL<=c_=B>5l+ z#^4C@*V>UQgnHXH6-1?f8D)>)wC6x@MfhPHA z{4VUi(mK%;`ezJWMetK%#DMjQ$*QeS`$M(S?$dA`iff7)D=WZ*3^c`K7p0;?UA;AL z;lTbk0n{p!=D8v-2EeAMJs` z0ZzMj`MI7_$?}BjLmKo}GPevn5znhBppd@lb5`R+TPIMK* znYCmlop4t!5PbBjhAqzfL;*UZjQux_$c|a=5Qs*lUD_~bL%Ni?X^`PlEg_HT=n%sL zMGUz5TXUU2D&1?!ro0vbn?pu`j5mya}W!S97j0r7Gx(E&d!|43s7Nfsld% zCpuN;<)?dxn7yv)HB4+s>0U~ zmNtZ{W3iI9kwy3Ak$YV%Hn52k$hlDqi0aSP-3t~}R991rOl$kWQf*WR+trhU=FjaC zLB2(;ub*EbYGK8 zBESd*KH8~~9ckaQU2uI1ng-^wi4^?)FY%k^0Dcn~ zai1{uPJ#m(Yl2EP<$Phfkwn%-!X3vthL>fhb}vPPfsn`qZploOIQBk!MTG2IFej*E z;$em~wD33{#52^D#0dn6Ed^%NeHr;qZdLNn(t6iK6tCYW26sTli|9qV8?i&rPx$-Y zgCkJ{Evs_h(0_lJi1Z{8Vv)My-Hm;^$+TS|r+9;Wk2h>6R600L75JveZv9d1ih;p} z3;gJ!0WB)QBc@~)IJ*-;IQ7aAWI6u~Quhyp(f<^>pHJ+I^`0h_nI?i@;}K%OBeNEJ zrlu(h*eLXY688nbfHm@Ex(aXApW?X+VO@b-LJV_CXmWpe;(bfxlfe8 z2ZsKgpzmyR*IT-V*M132%A)`tH?NU_sU!dyk@yZEKK%#LNo!yfVa!HTfa<$ix}SCp6%oo?%i-@S*U7s;~XF*dEy`p4HZju7TO-|_0& z$gc8pkiSZAy2-KvvZ>rNSZ-`t;lm+EdHNcE0Uuf*!&YgYj8}*CsEM~D7u`p{Ht?3S z=dJa~JZVIF<)Umm`-=~3V(<(~`DKzFR|>v1g{f#v-eD1XCQg+8Qvhh7y)B2v8I zVpVtnT8$F&vR@Y=?;?1Z{x9s2YPGfIeJ?o{{0kzR2n4t<{?dJz6VYei7h?|*zDtw} zp>`9tio{#09`AejZdtLSFX8q_T?UfaW=}7p_kLU8?9<&q#rZ)ddqw@jZWlM z4csFIE}vEBXV1=1&|R9?*gn7Z+OCi*Cq>(tIdZWP`{JHzVo~Go=Fkz{3g@T0=`vDa zCeN8f@}T&bVoBw-I2fk+0AwBWAGqb!S4Rx4 zfZ|QK{Ztfsrj~~O4&6icuUpTiDa9NfJeY<%7>Y^s%&Im_r1A?4TVA)Wl$Pf8vctmm zfmj4l6K9Yn2P4FAdeIl)O#SbaO8~;11;ENVhtNRvKI6c5l~v&x(ZcaBCFG)-hsTfd zVHrgRQW9#U?n{L|b-Z8}tOhs6)R$APIt<~=W8<Wp}0qyjeQ_AP$Sq6_r@IXiNR$bBpupM9B$ z7!YYZC6=FA|AwPs<&U~M?h`!Zwe*A-ST{lRp-@)t0`>6Ie(CDEce|aqi)%pU^gSrq z(8~Z|(R#PzA5>-wR-p}~|4lfylyNuG@v2b5P2jQNXIC4$a zsS{_AR!Gxmy=rgz7cd!vu$wM3vDvG9V_Ah&;$+Qlh&1v&+&v{6(yo+HQFyPPHaKS4+BU>svyBHymNr3GvS}-OW>Q`Tu~GZ^Em^_TwCM zposahI?)3I)7MBtbDs4$-JvfhdE8Sbc`vv_O=KnywVuV7tV^9&Y$1>gtFI{L&N2() z^JGbOi)X|9efAXL<&O%-T>=Xn&Dl6eAr-9SR|@B&RNZ+~tFC#u0SIxNrD~l9 zuh0v8?y>|aU`m8|LDMT*ZlaPz9gbM2DC0;jAfhlqCHuQ z(*ZC)keg_`0GKDow0@;_BMK6h*GAKDp&&^!OCM=Xy_h7mXtD*0)fAQ0PoH1oc>dwk zJwgs7(&O2Ne$;w|45VCb6*K=5hnw%%tU0~fdL~-}V)`W;9>9UPMm^&BgApzAI=<3G z?4kSmkpxcO6aS(fLDp6Jh8TFa{6tsXX}kK0Mh&3FyehdkS4VG6bAvj}>p6iuz4Q;= zj~s-ooGF*ABXVFm;?Za;>B_M<$sIwBm(wND_I|?_T=$LeMaLZc@JD@AugU}u3f+FU zC~9sO3I42x-F}H0i?r+0qDoW%eCR;3m9S#Lf@8K~2kY+cO?dF>S@)7icoxJ8v!mw(#EnNj}aQ2CQT z%Pc^9R@Wqcx&0D8w&JYy6bTfK_|hw~b0v*A9)Y?-0c-B=7hQ_ZPyo|8T??uFv_(p3 zddfCAIz#dKA4wk`AnA(%kbqCg{cxt!I~*W0m{3|aFw8Jg;CgQn5rI(B7}4MY8afg=fFo3LTw7=0h88WsM7;p0RqFX%1aHC|7-E;5Nt9oGHV zznxFekbK!&mopttmhf6Ld_sj#LN~G~%;*SxpBE-&-s~z{KNTTsHRzI~+pndI9m>}7 zWA#da&Pj?P-sX_9y7&AlsS>F{0)Xhj zx%3=LeU`m$TK}*J^_pR+Rui@3BVk7ZJj&sMXW;*8V$w5+1@zS2v}3-z51<+n0Cs2Q zs($u+9fKe?A}fGFR5+qxqjMuohdY)YskQ1Q`LII6+NG^D|K(n;>`*GX%++;BLI`*@ zy^)*46xJF7m)OG0NFs)UCO#9RJ$u9dfmTaGP(|tv1x@)T0+r=@*0KdyIP78-qFI>5 z;LM;!84sjw)X`%^K<%WQVU>~60Rtc^;Dn?s!2({u_MfcrqCFf#i)1a`kLf5>SfXsr zQk_01uTPYAB~UsROxz*X>2Y>-{U!)0zlDLb#s>?WFu>E#pE|LAm9u@T4RXOHa-5&b zP_CL!6D=p3g$lt4WhPkZ5dOjSH9T448x0cA@80l?&DC%M zm(rvhaPV)Pl(yw$9&QI)+E*uV^j`1SFEVSiy9)Rn8H;^v6uJ^YXX*owjY|Cyy{8c^^EiV9= zphgw+mSGx^!sktTbJu^~ZH+FG$Kjt*pRylC%a!MaRm+bVs@>9^<2d=U=>*)F%uYp= zWpd^t$f{t0M9LBuJy0o1x)qQpElpO1O)%)9s=In^^KVJz7(ijnG{FTZ9 z#pK95%>z$!Xrc>&HW&tV%Yn&0&V2)yjVf(ts?=$`sEGBe%lrPEd+mFzMc&b<9C)|q zSE|@&Wjw`M%lRppP(Oj6VIl06iG0}IBqPmuxAyv?j^fMC%|vsj@3a@~6jTr)q(8RL zF2`!?V^%n@rMrbyU(`sZye8U&_Y!mZaee)=G|K+?$%iGJvla=$G?6Iy<4QIOR9PXB2m7<+GW)8NtKkvH0n3d1|L^1l|{}0 z;rhFHf~@4M$;S^iB*nU$mXkM|ucZ$;$RG+Hj-@jkq zTdyA%y>o0wsiMIvPrTDGCbejKE}iv0A|E%w30=*yGu`o?LUN}e+fD*D%UlE zU|Dwg%vtKhSInQtZakXB>tr#MV)9}2 zN2^rsXMEduBy;B|b#Q-Jr;+jMolJ0UBCRdmlKtS$=?75)>R09ftpeM}NUI#z_`OUw z+|}P}EaxH8*v<5={+3Q;kn%FeKTEUOCfY%1YQIQ8QTB zHm946K^qY)5SCEe9c)L`=)u2W6)F(_yuB4wWj*ULHtj)h=hQ+TWpKbC-}Kpw)9;8O zFF>gRZb*QgW!*;!br@s(XU=_Bntr*FRPhA`s3P{34bP=Vuhw69_MK)#%dmV%>dt@E zfh$BVPx%$zRoS@0WL%vyl0a^Qjo$371>dR(CKglaa3SS;#voi6IkcSMpu+L$J9X%7 z?NaOGv=;W;Pw==5Hp*$!n_Q>S9wt6sBDcYVS$q!ZUfe>`!9yvdBY%>uc6j<_r`N?d z4YtPYvs? zt24%W3Y03F)%wY&n`oT-#2%v2kEffX48vkqFP(}8Sp}EzGd^U!NnbRoiX4CIY8_$b z$$7%a$iQoCn@m#|0es~Bs4+>cQezG9EnpZwau12J2z0`Tnwu0Fy+~1bfhB|mYPBI3 z?f(3Mxom3JcDfUjvJ4)iqeFf|#ljn_sqvCCLZU)Yb4J>ZM~&na=^0P-v%ZI*klVwT z^N2sF-kfoZ(4!x$AybHfqhCEF4QsKO!EGrBo6+Dsqf#1UEcEKFnM12NPwYz6k06o02mN2g2M)0;&4nuQ`klJt%5l%T_PgH?G}e{2Ou?joxppH(tx( zPXWR2JyD2-RsKno)T<8^)H@UdShR_$`gAo-Lql5MILt?!faxc?6MNu(9~ZE!pawl< zguqV((dV>LPvw~K2><%jbdCuxF>WmdAN}@+yhpP7{Y+UaEMc)ag-~0#WJG`3T%Bw1 zndoQ1l{x4(bzl)b(jDjcays%sME#G0KMo#s$^1gYm~baZ-N6^^TG~;$iz&9JivDxdb-SrI`O{`Xqk#WIlOswwW2DdS>6=Nl@8uFpqSyf(XyqYTri<|HGg-^hF| zx+|60?HRA*@U0pb>3T@^)`LIitVV!*(Nj(KOtF5mF!u`#IZeANMX^EzsRi8vSz}d= zEk5U-3zRgOU>s(AVygq!dFIKbrWJ4z@CUxnB}{)B?sqn?yx6}yjCdPc_N<@jEVlZ? zF~z>`%N_P?*!Vgs7GA>Q7E0Vetz8<{F=@oNwdB>xqJR%Z)VTKNzt9S_4Svkh8@L^+ z@TZ!)#&Yraqa^a(dJNm%3iE>A%E}1jybgN-=Z!4Wx8{Fj^?Vh2zzCrc5z2qiar)bx z4VTFpqzk(;ZpVH=FOheYoEIWlPp*y#I;k)^XsfGP-zF^ps2?Bx^yp zp+#?xp>Gp#t?VtdJ4HCkw`W~^L9_-K`9WYlc2`#=!35@@=T0)MTFgedxD&|PwmT2> z1?aiNV+WPbJVNqAP0h#7FGmMNz!i_f1*)I+W1NqjwLOBk@xjiH_ToHR@=Qj*IgC|; z!!qm(8P$cP1G+uQM8=Eonpw%2jK(%|{To}~aG`#z!dE2UT;AG#B$f0H&$KZC=yt)K z&~MTe{yx*&3LJ*Y=X9$l1Lk(;=Xg7h!;N#(ZXNHM*Db}QIEZgr!cyF~nf*=${O-$E zFp!Uiv2Ji8Dqn515_fhE4pe4Y8LDVl25PYkj@?bo=c?}DB!B6^zWfw@m7v!#pZ(y4 z&hg_nZ}6d<|KrG=w9dfwE1RoN>T$Eba(WN*T1`DdLW%u-!W!h#q|}wFLxbyC3KGo> z3-|QpEIMtLf1eDH9C45--B1vICX=&{)&$O`){%p_jDyM?4VJqP;myy7o+I+5&97lDak&ql3O?*; zNpz)a4M8RO6u&RQHnJj2-Yn#LsZ_oIA=0RVz%#G>EKg{?1t$W>TixA~$@!QIF!5fx z8L~OBv2%wXypbfdpYui9 z9p{pA*y)^Bv%yq_)9uNcBoSkFz78A1VS zO#X8Q0jerJC%O+%^{Hp)BtM%rLiS$;D-2bVt`*-ri@asn&{h|Y>WXN5thiC{tp%VdBw;@W4!6fa<-KhM3T{F1RMkv@Gq+! zdTs3&BxH}h*-`M%7JNKv?V=VO?njnj*USo7X+5h zdLAa2klP>s@FPi^%o;x@m-S?Il{1hq#UPOZVNesX8yNjD$FqL&BOIbkzm};{*6ebI zbOtTdzD2ol3l|}gKfA85b*avNlIjAZVp90P?b(~*7y+-~6Y3TB_fakSeem|TeFCeI zYUx3S6(3)pd;81Ct@8UlK$lS?^zVGS?ZSi2jXNxE0m7{{nz+6b_X^6{rPm2dub(h` zv=_K2uFXOcERgkRY_dx@&CW>f6~Mc}dz+O=v97?8SIewnm=&`9V`KW5Mf2f|OKr7K(B?=cZdDhyf@F1uY2-CL<|9yo&an`qxpQBq z2Ia?gzWh#U_AOX3LaQAK@m$~Zc2g;W{xOG3Adn}o5Q)G-QbG2uM9nWGY6M4!7Z z(dX4;lYBCH1Fj{b=z0Y#>gM9}5ySh^b%in=Bp_SY}59^|9n|Ha z`joLeF~O5)1;_xu&(05$zgLkD*u1BkqF{Xv``w}QYFAq8mG6dQt?Ty&YJ)DCJI5t=H|tBn4gIL0Q!UPfAzNp5{d@Lu<#3f0Xr2pyyZt8*%|xE?a2W{kR1qa zla&0i5*pAJt;Qr&uv+rC z!Hx%$_g(H|gHQ3?V*NL2d1DsnUd=A=aF_zMYS4yy5YPGu9}5a|6M3f9313khPZjCC zgau`e`98Zz%r|%c#Ce3P0lfCxt+r|E?k{)WzmCYWmh*cYx$Yx;V}Y2)wza|8^uY|F z2M5Xw-=Ucp+aq@i{^j?^vPxK>m143;b>G#m%*LG7MCep|(xvIozu0(Yq+{E9D1FeJ z9_YkfpG~ZL*TIU%;WTSIFXZm)<|*_FB2pl!)U6WzBW~O(;KMw`m~Tw_NIQu<3KF)? zY;+5Q?Uo6}99^wk&TYf4&?ZUb=5!ARt}0>8(~LOG<@N}?$fYkfVMGj4`KEcp%&dJS z5q7#Yy>DuqM$2Q-?boQf_oOcF8>7* z|Ja(8p9XRBaHmu9DZgbS-H1VQ7qh~4Ip5w9ELy>(Tl1ssCo&buW0oV3JnsHKcfd0Q zO7yrZU4`#FHu~22{px0XY%4|dGd}Q>74tsZ=;a?j*zFq|jSh1J0-j?y62g}kq1lek zg(6_Y3dEbK+;nHy@0I;Wmy+7o_q@1~mdL zFK#OwoDk-tX0j^(*lPdKkcTB_ZOm@po9XRvdYVHJ^;YLoym_f8P&*3RIh63+D>`|Q z|E!ey+_yP-excg;dexQ$rF!9vBK`JyEeEPpI3;}kRZPF#?#+M1u|`k6B}?>;IgLysGpy`E=T#r$!{pjp zFX=OQ%S`n&I$d$;ip#Bizu%pH&J$17@DHj_c+<(xWCuFG z9oc!V_E4$L_V5q zv>{skJ|r4HXwR;4>YHU;)MGyRD#y}tW%Vnqc4yVwwbrd1)2LIgD)M8PH&dkw`fzd? ziROa@++CQNnT|cN$0(sH`*BtS%0FQdICR|r=i6M#Li$^vhP6}|pVQL6*y~rm?HvUP!;6A<-Z5alJ}K&wEQmzyuz4 z|BJQvjB6tJ-oB+{qll<<1O+4l(mO$ls3;%^0YXQ5m5zi&1f?m6^sXY(t3aqh1gTP_ z_fQggO(+RO@+|uM-~D-Y-*5aFvYVOgZf4H8&i9(pU<0<}^>2=Zit(tp_CD=kC|a`f zQ{X|>A3vUc5!c`A{^vHq^PYx5v=YCXLfGmnDeP~P3w^WjMJ6L1J7kRC6>U1HU(Vax zkZ-d$sWN+lCj*NaZpNHu*1*W*!~3}#@6l6Ahtk{=H#&LatNj@MEE_y=F+eH zBU*Zm;RbtcqoB@!H$V)+&$!Ii4z`ppQJfDE4)L&4QpYfc$D$3^!L`n%QI=rEU8R)a5J9T=5svm9furkio>|sAr1{Rj8TMPUb@O=TlFS9+ z#@#&cwpo$ry&CP>b7~&HHCCITJS^#;eL%&jS_$4Y+Wmb3pPl-Mxi}_yS?;8h@fsgO z3IAO2JgUsE1qSr(USG5As`SEWDcYF9+D9!EZ`}JdT!~ZaHY3+kj6?Adh>_FYt>tb> z&78Qy#e6Sp6*!~@TA15fXnrz@9E~dSK%L9n@f^U-hwHL#7G;l;@TUE_BH_=+gQoLxdAVJve=BQ_eW}x_wwcbEa!EdIRD)3?Qmjn{qsex+?>VL;PoQ< z=yS?($J9PSaBS52jLivEbE}GJpW~H7aA33XHzJJ^b;J5=(cS3taGl_h5yPX*a9i{G zdA1p_8FKphH->=X;6<8>eD9DHc%P5?$(^i%IP#(8h@quFz1sI&jPh2NHKret2HXyz z78BGe-mM2d=qO14BrG#F=6>%9sbI0yLNFp$O8?Y^|T1O+vmXAA`;v!JYO z4*!^3U~Vgt$3ER4jfdeagZ*uX)$cTZo33w2+%i*KQy*B^)_~l_;b*vN+o$m0yTulN z8jov(`kUbwVJ8X-iUvqluD3w*bK!<`vtdb-Xs7RGM!n*;YvG^89yz_fMRJaUUAk8@ z2!EgFn?cdMu^X$ULd#c3dfyojzVIy{LZQ&VevZMEjRkF69}X zzlD1&TI*a zy@zVdgZ$T-^Ylc|`_iP$QEs{H$Doo$W}M;a2hQ^n(vK}zuZk)#JLKR?0;~iY6&S5D z7w;Rz*luYYATbo_sk`nOlY^<|6~oK( zguYGhH?w)7$fT)E ze!%VqGiA2UX1*RBpLiYo#ObyC4mvm)#o*tis^Q!rzw`HU9r&v1A%xg3G}9PqNJ+co zm9B3X9v_G1jLRiXjLHj_O@4#ts|;_pD*u@|x!CINkgTclg&(m4V~sGbeo+nUj?YFs zRkO~Mj1>4}`k59_q@t;@|L}uHB{H}E#vh&{(fLV#~Hloe%<6FY5+*>#Ka2?`2@GEA80hW zlz_)K|FmjL*GAk@89$&deKme~z_QTm(BEpv1Qo{ez;{ZARB~Mgc!_qgVBApQ7r*!V ze0+$6-T7_FZz6?-s>n9D1m?Iq901B4rE` zGJOsX?TXs*q3MINy~iG#m&g3&@s{$vt1s8OY-XF>?w)M()T?LJea|FUuRIGkIVt=D z%tCm=<%pEqeEajF&4>o^E9L5Q@A<1}X%Wi8o-CCJZ25hZsA+_3kVTA|Q-zdhGueS(> ztt~LjcVXjt~!P0WdTwipKyp#j@lObP>8&((uJ5Wj#e&*<{xt4TPLOTpL3#O}rQ zl2l%%CzRPY&OgN1@=`Js8HLb8K-PUivO `TX)z2uPmVzSdFcBdi6-^eFISTiKYi)AhybeR*HG z>|}$nwqgC*hOSu7i@SKbL8xQ_C8Vay$7)9T$z2=i9wy?aXX8>Z6=gl?z0gK#RBk7u zLt|8_lg%y%2Zu%zYlKkJb~+zgf_5#wU99;N3in8-h>=wS+kt!=a`dHBwk*Us=WX+P zf93-ACzzRo`;S-!9y`^h9ShfOET21G$V4ifpAY|FZGeHcs|EuVg)!CYv+Y0%?TfQ1 zAQ3I9If+bfQ>s^}Gg3?YXm%VbTJN?d7=>Ss&+LC=84KO@)RLC_I&Wno88lnBcnx42N~n78wn+QMmsh=`4T8x9 ziQ*k~3uZmI>xy%vn=-z~aD6By>K7ZN;-ig^csGUfuD{{O*_>WAE6L+1@?>7K*3tqmBTAn5b9>jj$T~ z&sr)OyTbJXsoEr~TNi;KSOUa%2}-OKN0Ba#+2G=~Z%h|bTnsORzcabfWWK}b>hv^ zpUc}ph=u^~{Slj1{*H)-GIA23PZsrMy|z2l@_@jPmc9YP^}5`LN4nffDq|#hLo{}t z#=y`Y#F`fqHESOa1s5$l(^aB!SSB!*`pYy=H)?a^{3MF-lpC#)^L)!yN;?4{u*XYL z&ktgO71~ONljpp~ul@@65|V^UX~MGr?!w^-lNz_qCOEb9CVCky_FM$J3u$3vcGjd8 zyA`p$)U!}jlDcO-LW6G_zBbPR)^C7*!TQgezUe#FrAkUl0`i2D#NG^z3&fa7&3^#-Xo|K$k zyJ9ec*ZW@B6@=Dpu7DiB!;o_ntNM?^*6yL|96BJ)s(ai)H26dxCk&p`08`X3kp^s2 zpUB|DLo@@9)Kt;^d!(+?iDLTI#eOBi1cVm7C5ZD?7zoCA(+E>bF%w~z^w?mfp9z^v zZ{LU0wz~Q*YJ4@pO$voXxN(lW_}b)U1>GDzP^rAwz=t61C|a7orTd~j zP=rsU>GQs3^rRp0p?j${DxzND-2JfirJk|e6#5a7H{>nF%kS%^rqT+osoYfa)sk(_ zX`vca80y&6_V^%p(qHT7*h>zXD`b}V#e^DLdhXo$Wej0( z7-=qGA;x9XSY|Tc4WZ8&zbC~)g!94UG2)D0Y_(PhuKRgt- zhugVCFnZS+3ro^*26UPH{s{W9&g6d9QfA`8j1fi)^+*$C_`Yu$e3)>FzHesKlX(5L ziv}tf$x4i4Tnl4mmUh+DXn_eKoiBHWmw1Vq2E3~E-`|wry$fA04&ta;j_Y%n&eC!l zp^91FxgE6l(a6_KOS(5J{JaBZ(V4!p^=?u*<_cYI3k}#f{EE3=&d;HG-Cux{v+1u zEeph&%6VjlSD~Y`yS0urRzs%mnb#5Se7Ebem@AS>$3COWqVBLq{Y^`qD}(UY5?{wr zrCcWvPgfMI{T*o@_gu3rq2Pt4#w0#_k2rsBtaAxZCsxb%qXct9e`Hif_gCiF8>gQm zu$!{mjyGWMx!o~P|B{e_kU*p7yo-Jxid%MuE)zdJO1#JFwiSYnlKlC#S2;y2tt?@T z;AHPfD0ePxB!n6QF(YuEi0rlyPCZrlT?zmu*L=xgWeznMCB2k3j1$|A^O&igZp_*3 z+jh(guAZSHC*YPK>%t!Jv7Rc01wJ#byA)?LShp2YTT(B^IAN!gu!W^r@?z^Bf}4y5 zH>-LXT@Q1*p9R$g54I1ykOY}4a<>|n=en32RaO;jQv0gPR-)U&VoB3Z*nbbr_BP}AvfGxQ0jsJZDm zbQpNP3pL<$oX?u(ZipMWPwbs3et%~9n~zjz4s?^O3&`DNt}E~D=`Ocgw>Oqdj&}HE zq#rqhu8(uyAB!eE>EDL!pMBo*AzJ*sG( z_l6-kJ8IO~kl2=q9UW!!Q$3plNHKQaMvNTE>%KUto9^c!UULakVG#)L=jlAFQ7)|D zB)cddpvx4NVuYSE@=h@Al=4B<~A;g-4Yfnh3xhl1{W>+H$eg|ntNtuscu@0Mgv$)(T! z%AymL`PI10u$8=b0IpO{N*4+1G-0sA%n4}K)$t+rb{b?vlZtqBzB^!I9LkuL6=m#k zq?<)4%U}~C-ufoBYj0IyI;#t|Ul;Nm)+8Mdgi|p*MwsIV4~<)3h98�_oQY33Epv zPjq6S>wJvODv3t$&MzfFI!U(*^%#dkKEcY(+X7mZfI!3#2A$w?wJ}i@)2Y3by`}JO-7;IuXqF?4K+%^Rezn)%MGKWU!hX)O z%RipMCVrN2p4{NIUAB)!Sx|Mo5)IO70&kRpooi-Qr;(@%6<#(?LT z7kS_~*IW_IpLP8c2bDKA!aPlBt|+ zdf6FmH3q4co}LwHtEKiOwu-Cwm)!W#82;?7*7@|3RL9fdvmXmT>Fl5f@hKQb3SabN zPJm8f*HtyV!q+~YHBuL}=JBW3sO|fKnSGRME?B3j04~0v=xBqR_q=`#R{x26*Ui@a zn=s#cSV@bJZ*6&E*41R!rR$#fjP7DqnUO}qPr*c4xIQLU0Ip0zrC-S^aZh+g_wKES zV7%$giMw$gXm$D01)|Tru)y}Hr72*T$WxT8JFE4;Z=h{)iku3{@^JCNSx*TV-}=<| z;ST7g`&G|!aRpRjJVz$_S{_P;_BS@cTIcVy|rVpggZ|i<6i9}$6 z0kl@!bm{st(?2ii^oEFxn#zbSk@l-RluW+9C!>$LL@Ke#%5cMjK1HgdxAe9bZ}Fg@ z!1u(cr%FpI+QPkie4b0t@_nB#;%r)5V{vxj#7fN~9RwAUTkcIQ>M7PgGj~U><;x6Y zSKiX{1>|rUm{Rdkxzp(AqZ?%6q4&MLI$Nk@h{OpgM4;}?8WX?2kkM>!XQOGK^=0CG zG357<9>G(l?MoW_?^%nVPMIv0*dBHSrYB)+a|H`gELQMW!Qj0&> z5X=CC!0byRn>sKIj2U}OQFTXe-?+p|^y||!P3_DBP9-e+ua%@KXwL)noKHCGM2+S4 z@O|O4wN~le)vx2dK6o$7%l}IC&g}iwA%b{e{gwPeLC=J{uy``H;du?-?Tshv%cT?$ zF1$Uw^{b-nkC36OhNb7R-Qg@6Q{NyFMD4QAbFD%FCm9-_+xdX4g8&hLlKCgDhr4+J z$kNKChj_?w0w1PvPl3t2p{<~25(}4j6=%1{dM&?)|qeD|vR^<^XWPJ3|gBgW0rD2p~ zt}nbS>-f-pc0G`u)_JgIw>`)bRfmU^b|SswV#5sq@`vK)<=Ulr+)P3qa6BiGq*6n4 z!%u2Jq3Ocsx}_bukO_KoY5dt}Eiexyb2;wb#GiWovoxiF`LOO(Be;3Nzq7cB)Amwu zV!4R(yIFMoU)}pG!W-T5*>+J)9N5?Oa)Q;AoN{>-C5w5P$|tTR3@9|LiehU z@9>4-#rw$2jqU=S&4kd*Y!|DVE1xSnk;L*z3Y2X5%%Di-)rG=vlW3jq+w+axhb)vj zFf7%WW8ev}v+rbDm%ccuvZKtx+C_k%kfN$l0Yk>Pu$90R8b{4jt$L&#TDDgwZks}( zn*B@M7Po}P%--obXh@0YiT=`qNRGPEm3dvu2{l$sFL)#$T^DNX)Dc?X^oj%~zu0ElFL>uQZo=}S8O+~o zMs|k8?7!%}qShT!mT|OJZs*K+VYSV_Mlj70CTGQtIw@ahfqCeB0C77u+++#3!`^+! zV_D*GE=zpoUylVY@ffL%gZQGb;ZJE1W1=h(LZSWANd%u!$BVQ#e4IjHu<9dD`Q`#Z zu@@O6p6|0s`|ZxzP|nJ*vCYU}BX=!8(3`&1)qkSz`2_Sh zGKN0>K(gSI_=gy}G;2L#YC$*6i>`#wwY^ysRO3I}Q*?=_lsWx4C6>(g3Qs+$7;v|; zQJ8ArkPrQSY`O5-9sO%vI z@8cyKw{0sZgGQqj#s`uhb_7_Fjh8yqBJp=m_(y89xeb)#{8>3XcVFrmK{@x;sx?1T zd5UnW!!$#RNt12)wXx`n(3U>P#grG5^3911enwaKKXD$;{Yt)jhE?c&O~tP~Lx$MJ zPYX)pxrb#!&SASL{!Y{(*~6B!>@X2GBBvVc4G+l%B1p3=0m z#9BP?C^LIMJF9hwLU9?>Z7F_a7@n!1>Y2)Z@lff{p24_1U4ln=5Kqz~*Sdal@YF3v zKgaW9K@I+RS%=)lPmXqqz~t9(s|-{E2m;d!*UVe5%@w&lX^C9)%}wOV&TA~KYG>rh zKr^Kn{ zl3V>`R&r$^CdlU?eT1^=U7WD2e`OCV&EP~CgU_29JqM}X>yASvb=>)|V0(YF2nsF& z`{3P$s-{aY^JQ?*#jNc!Q@7m;VKZ8-hiD<(%w^XuOlbr!YwKbCpm0)b2>tro2pb}* zot7t5(#n0uNOW7iqV zHTlDivj?N}=0#N#11LGh$wxt(&I_0xjeu&mQ@6DNmW_F}AAE~MoVo!0+-sHXZ~h~m z|Ckiw=pvBl?NV^_>ifM*H{@zr7`OK(-{AVDDm+4DMu?`R$|>2k0I{G`qtqCnf=v|1 zQ6W{+eNfdHvWimPLo6(F?^29e&>bG)?ZsJ2 zw>SOhdb^_W`f8+COOuyd$ljIIWdHo321D21z8Ldm!{CM(9jNaogiRyzSb6);W#to3 z6`V8@w{RM=3C*j^sVSB=aIAa4OU(&&^a|ajn*~lRg$b* zK(>9gnhn<|LRzKxnI4GU)2wL8<_>M2=gZyM$$gIC6IwIp9uWP~cVRlEA?xFN7vuuY z!J7WS>d@rQyhNt>k*J7EgumYsw`j}z*(f4t>%pp`7JTlCBMVVs^(JTCftAWHj~|kJ zrsEg<>W7nLr$aVI#eG{I6x)}#9?1JFW9^L^eDt>CbexMGW3)~>i9W{rQgwW4%IovX z;N5T1y#9mpEaNi@4}r^|yOhq#yrJHYmD;Di8{xD+^h~rM39sT>^ytOwd}rAETiT`4 zzlng{M8lU5b$Ty$J0X&rzl4+ZMD)2~xsC0*;WY{&5ifBIq1~KdA>3xreNEhTIU~!m zn)Ba!D9@A>6RvBbvb_wYnv?0yJ}qxK&zGVa%An9`6;>GT?bI~IxQb3 zt1=t{{EU_UHr|QczW&Y+r838Y)WizE`K!zQ?n5`4)7)~57WB1#uSNsHM7;zMgQO{@ z-@!{iScE)(p9Rqg1yB^-e#*3n8j*N^opDUY4!LOEca3vA{}r{5??XjuWh>tw!4}YX z6f(UgOCJ^>hE%n(j=XTY^N=y*N;RKG=KM#>i}>s&_U*MZPIA#yTbjXtdQMJsFQc0H zJ_o?R3&&er7Zy`->y>&LLSM+_o_;mk#BLCzp{a8A4@5eCV0qs8%W0?-$x!r_ZMi-2 zErS*Ic01eJ*GKa3#Q4(^rPolb6R=~J*gQp$tkZs{{*edl^jcZ(je9TAGhswl-Elp`RL z_lLy>F}Bw6Y=mp58A921)oD;SKibvwV1Io z>z6xF$(#8vck_gSP_N`G6th%KqC{WPYk{o;fkF!mpp9e+wbtgRr@yUmmri1%-ph{G<$U1urN~1_0UZTXy9awh=2c~1P}rZ5t!wCV0l*5;&eHrEEn_*~$~ zD>tyrJuYZIfh)kqL9wTVFe6lw>nM;VqFf3nTpUeFUp{jocoL?qN`c~v_etmOX#dfQt)|NW=B>k+M5}y zEKRi!UwLdlqvHx74=_-%FhvCD((*mzY)*U!BVHo1#FdeC)?LM3dwj$K1m8$>KP2Rx ztQF0x>4W=IHO*#qL)~E?CFY>)o~|@+(fXL>z7Ez?{cq=m*vqo`V&fv!lfF!~;ZgYz zfMRJh2H{wU7I%1hW-vPhpL^79y=`1wJPWsdmu}nlS$UE^w;C(FF5uc6DO(ewlNh1O z6X&||Xln1JWLm`+Dh1&2IDaAmH}*|jwsiSkaq;`;BbYWqiw!{9E=TOl#<}1kCOTi_ z)bN3?U(Hv%_FG1!T-PmJ{7NUIFm6-P0P~HN`Q~nDo$8D=2zWi@h@ArQ-OIP$QTGim z0fvZ2{Mhw53TpbNbE;YMb99$pG>xz*@mAy}Pg>2rqT!#`b{l!>s~M&rv&fUchek7Ekv`~$VSpYD>U<;HofM>~4NZv*q9^mqoNyJL+lk(oZcOp`= zU$*Xt48AsLl9pR<#|3%g5dl>r-bj=Nat#b*qkMcVqVz)h}+J({7d{teL z7?SgUxxp4A(W9dR02DT5!HEXc_)~|N-A)j)dxVim^*xAnV@^$OMYV<6&o8Y0&Y8PK z{KX2;7je1Alb`z{g#1ZBc5r-T~BO#S~=(y#Fir z?@0xNy}iA08n&JJ^0g)Va~kg@A$_-(bu4_&#Pg>n|GwTb_~dP1VTW6H!LQ67n2gf4 zByOG?NO#JNM+^ER@()PIe;d1?g82BrY5`P{DerLR%*!(mH10kvpFa1oDn@PJ4X~tX z!kJ-QPY(Dde(wCK(#L42^s`CJNIflW=LJ1G4;qdFrIuZ`dF^`YUIU0Yd`215fBufc z&!tQ98tdIHb^X}GdFLYCll9QNpkAX#R-r0;AtUuA4~Axzfb>LdI}Mb*%kjURk3eNn z4^M1eiQ*&7|7)1|-^_5$_fh9G2CPS(fj(>(aRR8%7fNQkoxgI-0AbBP^;}{2+EjN^ zw*6@jT~wu{(^WMP&B`vB2QztW>q-13=uf|@Dl$4&V+>J!pv_RnnuIjF+(x7s8;l7?xxj-jb(>s)w=O>f%OHhe!= zcS`#1j+$FnSg`8L(8+?M(O?&kwb4hK@4yfK%U#8lX%8KiIMqP_w-qaKNd`^~xJOOR zXH>7u+6u5Kh5LP6YQsh-hQrzQgv)KfUs|!PCOPGNpTFT0-Q?JBq1~jPz9xvAdLCKi zqo3ca^WknS_Vwh3Y}wya=ruxj*jL4~n8=5w8W@kXC{DD5yxlz1dtuo;i3(3SP5yS` z8HyN`$&(*Fe!@>wfyb1_Q%9XuGo(a#X~Br~|HoRCmH0Y@zRX|O;vF;2jdO3SU zE0>#97`hft_sp$J1yz6G$20(dO|SpOxc*A+K1qO#S&SZod|&7;5^pCkDb;!|y(TDL z1@;7~*Df=DIrqwxV%~!3Wq#_@P&e-2y(e6aE3d!P6eqo>h!Bw|4*?0^xzl2#(b&US zB`t$sG5&k#GYI zG#@7pB=zN#%r%WNZ1|$CBJqa=<@DTs>mhrKFP0_G7+{(E0Hiel10eSYr4eW7#Cp|a zV=R0QUM*g;&(fyz1ar$3XY?rUyZUcF+RAGUZ+X6PK+|XE#qCX3>B<2-brcAqm#^{u zT46upe&Ib`#^705kF_qKHaiGhH3dPMFKewWnJ4p^ql)6VUteLhq;e%-J;s+ngY1U~ zq4)jH!ay8E_aECHfaaITq$1Z1xLDp2x=#Hv?}4P_tdV9IX5`dNvB67p0t`988#1_= zFtjBAylefl-264ca9rmx5`Y~~p$9`Es2Jr`ivv$+zSZBQWgd2>9q&F%dG7ob(eEOc zKlS=Igtc4l%HJ zul29{Jo$gkHl6+~@c-@$$}}H&94<99`sgRVBq$PQ%q$*=d&!^?@CWAQmP%umEn3#_vHN z(=!MUaNT@hk?8Jt#+KLFe>U*o*C^nOP1mG>=gKZo~iVJJ}~o=2^Jf^)fzt0)+prnkw2_@PVyv z5JxTyeh)w*OC^Yk!=sM?TVeFvX`119dw1SHZ*um|{{zp(^zf_>w1Ou1?IVi`CKUv) zgNp!Cp})cI84jYMr~HTAQDB@=7Fyb*fQDe8WtA~UcNswZ-E+_=*ccyna)`*a3HL0x z5ARI4+5G9#EMU-H6SC_5;ITYYS$+%GAq&tcE!2F|Yxyu@66M!U9xfRDg^cIzs|0)< zt&!ZEa5($(-Vq42g`P{}`^d#-U; z3MR#g0d|mo&W>GR`yzhn9e>lj;!CbVB!+MELI1kN56;k$(L7mM`ad}p019C18(=T_ z=T^`{UCMY&fnfA6M1S%V9~vP3-&fqj`jq^FsEWxex~&*l zDXA#_wd+hz7%Uz5!A~Ww6Nh+rX7xOx&Mp1*Ld03h@w}pv6s=V)douvR#@6R*C!ZUy zcLLGgs2M1v<4PIlLlH|nR=)onbVwcy8L179KWCc1naX>|`W3tII}l&(!)wjCfF0-T zFGk&K;@#=G7g@SBfYSVw76v-;ybVas)-X}@=4QNLV9}y-KFXT>`a_f#FokU&%;TN- zHgD_&8f&ucq^Ie!_6FPw^TGUpA9p{$&}T7?$j<1&SP67^Ai#2n+;7*o+UyVN{Uo+? zzflelZ}%jaWA}>>ftKF_7XJN&Rx8c6^M={vs(zl6&8UzFhQs*OiDgDY6(8k#xx4uS z?=$Iw!C6oY%&jXLz+1w(^Z5v77SAy*+L)eXiskft=;axTv)pDYtQ|c5jitwXL%KaLLru@#r3lCN7)3zcn>43= z$7U8r-j${C(oplw28-?g32N=JCOK_3{ZGulf3|tqaR#yUe{cdskt$lk&rrKQsQOSA zA@rUR7o|OSIM2q6D_;Xj#kjDEmy?#!3(VvbJJv2=)q$dd*%lrQ z>;fVE4uhvleE<)3w|=~leg(^NE|GRFdrEBPltJ%Ywnv^=5jVN)%gQ7G7c&?)^AC(q z?8pP|ugEtjSXU)?6E$-b*I(@ZX;2Q>4_#LGmy3%5>$&BIeg15d8MWL?1SY21t)q2c z)%my%ZsxpvD~;Y8&!=}gtW~X}Crt~+T|nV$sR!j2sMNL44xwK9LF$~qPI_P^K96~fN|#f06qh-_E(A18>2kJ*?ke$ zz9#~qVf8Z5{j9OmVdw6@1Gvq8&!VRLQ&{%1zJ@#HBpb#WxwHE z|F2Q;5(h-h^pOedL^Zy+^pe8EJB+4AK{7J75u`@qPBazgmCQu)<-``GMAmg2Kf1by zmHFMf(!fpt_`d!Na(eW?AgIAFhZ_I!6c5KZ^oGCu?twUll5{ypv%w3If^8fgjn~(l z1 zRzFx=K5gr?ZH}8MbRt0XgTuD^UD{#OTz-7^lo1EKROSs&0L1z4anTF@WnV-he*v@e z-+I^L=p)a2VHY$m_aZOahL@4R(YE9p!=1WwRI zJ;<$j40;O-)GfQBZvvyI|COZ-rM=+XHlz!_S+nQP3WAhgrsMQ{$#6sIgs|wNLapd? zwB}O!8_NjZW6HX@OTzazZ|eJSIU{N;aC!lQmC5C`WZC7_+?O`4NT{S7Scb~ zH%S|O_)Bs--635uX8t`~oLOJ`4YIk-7-O^e)E>H=-{{;I4;e-;G!0*OQ@DSo88O1fcsPBm#id68GsvmlHmTHb@7KV} z&IaU&T9#W;Os46mm`uUpeey4E(wJSH`!xQP79=(ssOe_ z2jN2vY$xQQD6{&KEE2FL0+|$89{vtL<{EEnpE0!IgJtq__zVgc?{i1z)ch8oTAed@ zZ?@&^87$x=#)+-JO<;7;fVn?xS{VgcoEF5%9G*d?y*Cli-~Sh)_v4mlfS_4TK4p=8 zkHp)zJfm$({TcF9T(wM1w0ETmx=f^&cVxB?n{yPQU{v)t*#WdCp^&#C&CZIg?6XT0 z4BpxlUP7J|W^H905wvUdaCK$cH{$(ljPi(r^<2x>J%xbsuMp?oLVioX<5Hofch8j6 z^5~@o=geCWTIwb8~}R&~ne~lP6+ioVtC+i*fe{V(;Wp`m%@SP}1HoD3R1#x?Y zBa@bqo+H<2xt$;Fv{2tzQ8*a4wL8yI;Wjlzz+YXPrZXrA?;I0$BALeV^aZe*4~;Lk zULUEIX31inPoO>ON!#Y=o-b-G+-^YEG;~Mv`~r^ohb9jW5P)!EI;~62$A7^{&Yo7m zth~Qj$;?;*W-fSVGB<~9(hD$)$t{j5g-BAI`bHN3lJ8SV>Pp-xG2K`5Ssu{Y>@_uP zK$e_CdgwVCK9ZN9*N+so zA8MOvqZWaGdkAy>K0h8Q693@4hi*^o#Git*ExXpxuSak0*}fN!%07jzPl;K(4cxr{ zj1(z2+3Ys}tF|uxs|o;&tR7_%hkMpV3YsJ3(`aK+p0SFCg@!3ce#y}*%CMWfN^5Uy&u_{L?<9kN}GyTh8a$vAM^_>^Uy(rmPm_c%XnzZ)D>>rCcuGjd$ zci@>zL@K&aN=oakBj1H%?1T~@eo-~?RLE7=}>5d#a`5xGh(*& z4PHXUO38i>4S2~I&=2YAV_~JLuV1z+0Mh?a@N$jloBnhS705?5Rn6V&1Hh;wyMq1} zeO2rphik!?zOeDCRo~~G*TYv#>c|HkWC4r7z)-@>Hr$~aTEAzK30~gtF6UWM=kokB znuPQr+9jK>aT)a*4in-p6P1_?IfzRtDg+dZ=H>3?w0x3*uCgKdtrD4J$q?Oa+rhAw zgTo&sFK5c$Kz$J9qD2eW#NWQUHYB8bGTRiS37s;laGj7S3IplFJJ*~)TKPCb*@g^^UE%e1;+Us}68quE2cn0X43-YW>Y3$M`(5pJBG* z=#8TPf_vLvPNzAgL0__e5B_?`4#{&LuKwWM*nJ+7#uBnmDujNTTaAtf^Wj;YQY!bI z=B6UgchCN=70|AT{#E7TAFUUN$8#gp)pHmUUb|p=md}kvz>eKrlyI9R^wG;xfd1@W z^hl-HsP<^BXnz|`6MOrEgTh{vNG*+l*r*A4@WpPI?n(4X6RGZ2HVbmbui=F2&#-}R zB*Zee?d^TMkO%%#K){Oiv+%$P zpQbJo(`DQ)9U8#}z$a}s6;PUp%-!wr3Uj5sf!2jkMiteeaGtHJGcz+beHl}2sSa7U zvVspL<`g;e->&bESzcvS@@%d>DlGcBAehC};J5hN2AY!QYC59irs(1w^} zm=gczB+5&b_nW>5o7t|wQbpWI5}+CY?NGSB1=Kd?56tG-TbF?vOGP^p8S$k14ce=_ z;B!IUYx&oSEE96^U8_A}LfK)eAHT>wuF%U(Ev|L!rfn*GSVW=!E zAk9yR__bP(m>ItL&_NKX3)G6^>3-_ecA1D%c?hT$nFL}O)w(*s)s__P_K{awV~fdT zA75V+tP_l0WrZv&zx?1mmw?aXOZeq}B!O#lbHSD;XO50c9^07s#;Ur0xz2QPtocNm z_ABFSLobG9;C@$3?(Ml=8A9#Q^g_Voc{mW=NB&W|!-pQ5@dx|_Dg>*8*;R$36kE?~ zy0Y_;V-`Ud5$LdXMz@1TGHL`y<=%oKfQo)?Ca+P+k~6hPiF~$h@IQc!~@Zgl1XT1oaD<+yl`(Z8ZF~r>geKPn>&&q0w@?-a8nn4 z`a7hfSC;GOYDO@r99UJm#$j*@WbJ7KqUB_e+fh9@X3Kae{n_Qg)fyLP1rxz#i7YW5y~3ank6ZyE@=DIU5;q=^ z-49!mTp%pQuq7Gle)3G_XB|^*}B!U(ZYXx~AU&gAV@WvMi&*tN;2D@#l zg&BYJetX&4E~(oPv`-i-H3u<-J=aE=XU|?Ork6YX{et^kg26`<=M*PuBsOv?jd;qi`{t`&%cPQ>61dZj;5F>&CTs6DrW}<|<%Nk_va|L}3{ z3t+upn7;@(25M3QETj|s7b52(8X18=D%6a_6a_nqht*@29d4<^NspNBd@M;DH|zqV z0xIO@RL|Xmk+#Vet`n3+f^$j{B?}cDJS^lrT{5Qk+3PbuS#(uCLCgpt^yHZ(0AX?8 z>_k=qvA)i+SJ0zmXv2LKIk!3Q?tYx`2SzU!U5yhhp3SRbB~U^4`c;A5nVi@wB~a)fBPTw87Iyp zZj!~!?3f{o?xthE`jdPfa0dBOG5_4{;U;^Y-hCDFn!icD&V7^JcYk^PB^caTy6=86 zgZbi~^tYjw>yH435R-ba*QF^PJ9taWPv@@_MA z1wu^}Pjks`YX>$eaa&~Lstc$Nd7Ic(@?qm+3m%}VDhXe3?YjR1q^^gVI@F7C34j@I z?(7Y1E_}=WQ!KZf;q7yWqQ>sCRoK}(a}=Yuc`N*S*`IG#kA04(Uz=|k)kE|cRqT$u zTbOhy{i~kJt$tzoxiKLdUF~AoCNXkaNb*H98Hg|H8vShmgzHequCdpjGP&Fjn7Y;& zB#&g=r-im^7Y`S-uU)tUWoo%TP|xjhD6X_ue-E~uvhBn%)}eCn{0L#xdt>+HIH2XA7YZ-h z(!6T^rK8+ItOB^*S6>9(G`60H|G7^1IQe~>V}R(@05%S+@Iccoyptv0?o+~FP!`Q6 zcZI6l^4nO653a*Ci{6t#S08n6AM;cmv0DO5n+%T-0qh6anFM$k4B3FHbzj7t(f7Fd z2H=*w6;&Z!aLE)maJ`qUu3rTB44%OhQ-_7S7;vYi;h$aCBF==DdF>Sz>E@R_PZ$b7 zXJxLK(Q>%Os&B4*@U5FE%$Zb0bbsX>`K#>imanybHGxXTk(SZ@Y$37F@2x*-bg?55 zsUCY>9Jzi^ua$GQ0B%*TmK7IRozV(3mUW8RQ{o+J`racn22nX&T$x`7u$b?_%(IwS zmh_;IJXe1o6ipk>7u~{bW<*y>#OdBDn*uz>0HuCU(vpVO6B~Mg&vkQ? z6>0?)sKv-LADEAZ!pbVAt}+_$NYUIDss)IuhuqD{k+6q>Gxy+f>xO$$Muf~4E?@if z0SOOrd&fGRkA9=D%hcPwVWdFo73+CDhGm6XQu9`zrQ$xv8WCO67A`52-+0f=f2O+r zQ3(K&oOfDWhN|BkXu(>(n8;0QCb#}^{Btzc921hLJ}mQ42YJ=q!9dnG5J%jDiWh3ai-myxZZ=X)! z$2cri6o<4oGOphxHECN8D6yesJFkC`4$F3JbKV`BqDyv*q zX4bkGj!EO^dByDG>63>TRoCV%FkC;xgrMU|0M)$J05k=vQ97CODB(z`b+%(1zf?bLJJZf>4M|HTw_h zOB*uIKdGySiUh~&F4?;X3L8wRq=u^X9*ZB^wDWXpp^m!&hlM+7$NC4oT1nw;FmSBUei1< z29yn>unoRu+L$TW&hw9Dq1oRs{=hXD0T?U|7Votm7ks5{XZ&`A!^IUMs=uXnCwxm$ zj2Nj=(xW{Lp0nBtuiDVHY~KY0JMih^ldHtSzr=1UzSkMX!{5D4jZLF#?Pykcn(*T& zZt>;>2vk&-EwGmNIOpw@;x(o8vbdedPl}$GiQTjgnsbaey=O&d;A#I{1V=Eg5?7C9 z)>Ire{v~jblmy(?%H=B`5dhxy*Qu9uL%$zO`PbM!KPB|vl74p{8p#>or4@|zwF(lr z@^;*&#-Jqn>-7xeFFmWDy-{1G0wa$0eS?3>M z4e-Q%_TJBZU-xxgHxHmZ@QgVcw@C>CB$?wr8?YZZmMV*GVqiZ+G>5$@zUYuAIl96( z%zgOWyR7(uw5ESReHH z-hEs&AH3))57rCS=DI*GdbjZ|l4--atqCxLG7Xdrcx2^X2Xo&SM`Z{Cw2CTl6W_zf zr$4*X`z+mDnWi;js%xE(w$|^w6n_+mEOs6aFZ()o#KAd=XmdFcPDE8wc9?)Pd9F)^ zF!Lb5r_Tt;=A>V9OcuYuIgTfuWMu*_fnXr^;VF2q-%~|mxcn1~U1ItW667ry&OhXp zwoli~pAvNP$Ln5mS|%Igb!7o|QT@_w%UoODTck8XNpEIT1G7(@2XPufoe&KFjUrV1 zCtNswpUuo=``6IAdQRDgB}huZJicy2yC9mTVb$})+j6w)Pk*+>&}aJ6-v&76$DrbM zqR!7S1Ip&N3Syy>@I_rjRF{WsEs!hY!`aUH?FJDz0u^ml~I}ho~a)D>`A%Gn{e5L)@n$lurV5@e>44qpSNPiOt zcx!sHNMpXkx{ozrJK8HDSRnr5%Jrn7x4KJ)tzYm{t#1cp*nl$X_sj-T%Ir2-rs10w zao3)ZH*PmKNo5+}9NpNII_=D$bVs``;L}N#T}rIInrR|>8gRM2v0G^#@?*vnheYmy zY_kJDpw4c$O-&W%`l+1Y4zCt#*h`%{rWcd-x;KFPXv&3fXo~-TOEajlTrgzA=^E0x zHXDx@IBm1I!v)2lz7K61^OG}-&9tpkE%is7r^##<(poMX^8pqnpc=ep&ap|00I(R~ z{2r2|GsK$dRl?tx^%*FiEKI4js1z}d{uKY9*IeBH{o^dPoTC|0zcPn#pYQnE@p`>~Tg`^m?0qm;UY4|T5Q#ylctALl>To56h2JpZYZv&MHy zAUK6}C^klcu?=1&^Yq!%I}fQlr|l{<*Jk@8J6#fj2+M*mfC`>x;L{_WSMOa0t%8XK z|GaO%Ra-VjL&{|q;0c%G`=j&f#X9p18DFYB{t~t&fcid^Rim}=>PIy_gcHz*O{W0K zx}LLJOu0Gfp#-oT0L2CxCAseTO<4NEV{>G3TL|6}|MH@GJMJ*b+N7ga= z?^P8`+`|ViNd+R)^Go6w@#u>4s8it7jDc8OZ=&K(^&oFE>AsVblOK9`c-W$MBu5{K zTGjYv$Sd=AI_|Pq;mMnZgaNK;NZ<-%H&r|zO*7a2a|HY(&gb;EVi$8b-IuF*Zt|YM zl4F16BN%xQ;dzhF02b?-QSsRcxX1R3%UK=3Rk%G>?EruV1fam(T3p=9Tri&fqzmKf z+yU*2k(^?PZ^c9qgz~$~{mxn%&v_h}{{{D5U-}F9QvM&F$24rjfri ztp$U>$E7-uZ-n;%;ano*66NGEdk8`1w$yz__g$FKvrRJ$DM}AE0D6+;6Z2<#1dMwn zMy?e`n1q8g-=+$E_=~++WYCu4tn}ZFTffKVVx|c&#nO{OPZU1I9weKlV3BqU(*LhY zk(vSfKc&?c7wi==RN>^XtWNtI0SoP{GtPv;lnEc4()(VnyEb^h0O9l5@(DRk(#a5i z^Ra#s^@8ta4l!a?H_the&|N=gVN4xtkMq9A1N zs#*{){jdn=zx{dq++P4l0?16WlvflXmsMxNp`XGGkw8)K)X_FC;F)lO6M)H@1ICe9 z-4>jlEjJI3-VNV+mmtJvNMJ==5-?OOK+=(PZ9P~M<4^LVKR!MvVn7kqPN01`lE~O_ zdnQI&`k;fKvvT;;^s|qQ(lBiea%a8;fIu+INaQ1je7&*7d*D zC8PuEPw#RadPx}|LJZo?Nrd_&B$^N771DiWMvUFT`L_Uqhxd1PF6}tmebze!C%jxD zn9B39NXLPnzj<;d$F{FS7)pRIyELUXm0bQ`c}NQc#tsN22G(RLzAW68!~aOj_<95N zPm>JbHS1ge>)kd*xq>2v!}N(rIL^!se&s=4w@^~y^7DbY(C6~}Wq<(y1MqQWKjx@C34@Ec<#3BBd0O)Tz?yj%h!6?f zCZ^$02y$KSB3uai4AT~soRyi~lt&Rw3yEuUV1o5*7pmgfAr=DNfR40=2*!gS#9!`& zFnYM;MF)|2qnXVk^@d2r0&y~l8hqzb5i1ur|EXnK0GF)s8+X7(FB+Uopf=ZcE0Y6M#}Opbi1NRlr$-u1~cY z2d;-a?=~Zlv;sn04yomf!moM}Tq`F8Kr`RvXRiN|ki5Nt)l{dC(68Pal&23JV|`v=g2Oi&U)BqEg?3VH1ZbEQHyT z_Y-ay2CQ$ISl+_gYNUkRD)MAcd35n~k@DB$hp<=9iCO>8|E7VDIukq~-tf=-bnOQ` zB~~A0nu}CDs#%4xkBD8-Ww*BZ99bMKW69O!>#0)1Web?iDg&jmbCz_U(ufw?I?o!p z&Da%23XMu?1MDbci`hD-CQ3$JM;J-Brm2L=J1?&tcU5ONcxg0CL?hdOp>aiT6Q?3s z{WkFrZW%iudLF%q-Vpf$@UX6-w#hqWdnUE^Df&QpXE?k@Xz6dZ=WibE#3Qf0Kg=`` zxc;1=e_g}QPDz(#ywrzh6&7FEm~7FxbN@3G?EC-@J)!j+tm(}0UoB9IXyEH_W3#ZJ z8bQEPj-#IHzoVwmjtUzc z-uIGMY3oN5Ge8e7qGNLdiubr) zeNMT#IQHI>+YFrw6v@k4^g5^RFYoU2h|j7nKYA+$5HijLn@}!J*0$nvGU2MU{Ybxf zKnm3WzOE{nAZ{OeCQ&<3_l2jMIpqK<R?eXq&0d)_7RJB{BOdE1jjwgqthcu<2V^GTgtSlP{9-3#>D zspFKif<~zzTMmPc7X_49&Ii#a>?>F1C)rZ7@Qh6N)(?HE@*oDBl)2)RfXcOB(}87JqU;)Wm@T5rOTTLxl5wI^v}iM6YLRI{G=z0OT5 z0h$!=pVD6NFak9zAL;T4(U=?%XpBi3wq;rBMqW6o6vMcX$tg#Y{`_qGAr~$&*M_ty z*N)<0g^DcG3r5{Cx-NSg3Gd@Cx6&(90XBHWQygd)S3;vNBnEy^go)J|0SXUW{~mA`$5g754Ao$1>`fq zKpq7U+5Si0!-WPaN3R_JR?Cx;_Wg~Z-v3*p2>2ou!0OSnw$k|zm2U&$nquO-!h!n` z4S6_`T~tx|55O;6Xxks&)8}M zGBBV>I6QnWg2}c#+h8Q8W$OmJq99+sZ1A`I`zNpCXFqCZNTgjj`YhP4bxIv84wzPz zY6G0H{R^bEN`LQt@kKSR5THy>nT{fUT#xs-EZ?pH{6d^0gt%6)qb#EUD)X-Yc~_kA zd8y|NGS8;eFwQC+6V|m@XYtRkSB(!tN)w)xzTz&of@?+mr_~hog=J)g?v2!LjhgGu zQ`8svLFABKGl>FaVH<9CLWQ5H>VQrEKxRuyfCkVTYI^H_TmS#o<3NsmNA#|bBy5HS zlylxB@*uS98-TzqgiDa!IYlIE1YDYSFZ=wdqhAX|E#GbmENc57ozUV@xS^Xm&;P5S z=MO-hnQ4Yt^}EO1?LQdGVtymv z@<^$o9QwInDJp{3ePdUL#kOUG5AjyBm9^TQzH6 z1Q1H<4I|9~16B*3%#&g#{|U4j zWBH`TG~Yh61ql5>$EL`ikmEjk9Ig|d3v>D*~PX z3hrH&_UboPejBkJdiQ_Q7P~X{<+S2L#cQTyyh>0s*CosS_7H(c$ zI&2>EO3fNeB!4b{$JHDV!%ua#7BFY;fIq$gRJ^C4n0ZIOX<*Rn%OwX0i8IJ4c&vk>Rfi$clL1kN)5EJ*NN1 zjzVkk%GiT>L=g zUg9s7U%u9NQ*UT0GGj{Dq+l&4Caz{OxJk`_gG*WdmCQ#b6<5uEr35BYvEds@6FT=P z`h{Zd-;m#cVj$E_#Z8Eucbf~p_W?}npj=!BSKc&_^up~%J-zPw`Yj^ z-Pk^XmO1eA^yFR-Qwe}ql$4iAh&CE1e<|@x_eP+&f5$v<&PaHkgTK1TaWhscxI&km z$7IjnT$F(nCfYKjyirZg0`=zjV?um5Ni1UC&c&m5bxQ4= zVZ=!URn__hQw2k>{v^KtcLM$nZ|~rJHVQ_h@PTy#r>LpNsg< z-^ioRt62VB#U($BHohHAc}7yai<5RlJT(Z0amdY$ApD`%ff{*P*{T&)k*i7DYn(v= zTuSu&yp)t~1+hQwRV6OUH~OUC9SVSsDvdzQrN3l0RI2Z<@kN-0-zDO4Zrh2G7(5>+Q> zhjRy2-`_MNJ{A<}k_(LjF)NuU?`?D(8-Y4Hj^|jz)RCey>v0od!Y!r@KU}AM9fjkY zzU*I*KUtvPrC=2+*SOd4z1>T4Zlw(jt?sKghXO=5cRfTPYIxQ}KQ}S=&hwBg=wDDh zJZn&3i)<>PDdxJxbFKa4*F|Pse2o{U%Hr@W>SaZg`9(SY02ultVwrm5g^&S?Ih+Rs zzqC~ib=gxyeZKys3Mc#l3z!;J@Rbtx{@w6X7FK;v!9(Wndyub>QTAF=r}aE96+UH0 zePC3@vkv>&D<^k#PEVSoN7=+-=37o^Ztm-x)KTD1x9+Yu8KTp4eM5S_9rg^K0fVT2 zGYXrjGgy?loXNRxhESd#Uk(9Pl8H4y&pa@d4VG9Co_xiF088 zBCM|A;(Ip%XQ8oBnDD)%6eVD8Js}y5I%I+F!qQ)cA~uxVPH<<3-O;(O!xM!XE+srn z%m&@v@d10OVYa}-g`YvxHcd$(^angkw%(ENY!j`}@8wX=arZ17tjW)rX4QO5Any&A(6ed%H}u&N%XBT z;c>M=;p7q{chvIAMw&DfzhaUKR4%UCzhX$4q;n5_PT%~hnap*VwR9{%>rbVY$iPz& zkFr$?l-oyHGOzEP$j(`IBCNJqG!~Sk<%b?(j3hXi{v^3&?kK`3 zflN=(s?u;~ZBc4kDc8k>NVI@u$ft+fgK{y1Ua-!tD%L1r-#x=G4rBU>-R#OSp-pN` zS7)AvTIBuU;ta<)pvCvfafwxg}2`|^Vug$4P@yW!c>kAH|x0T9y|am$X!;iC!OlhY$*Ln z>tza;Ip4SptpukS)2q{{;yu*e7i%2YF}C1u%wA2N7JTdmoQ;;i6u*S5JR-sqL*Vj2 zkMA7QYf6a@ZWh<3tGV)hj@pat~zMU{h>o-F(Rw$#=eAXN)0S?DHf^K}Q|Bbgk(# zIV&F@8e@_=Ak|JaiFLmc%M5+8Y67^|SM&?$M(RBi)N))}&~zHTqbH~JvbHs)2(}7i z)k=E5MM5hF^o4%n zKzST{G~3O)CFfKeA19l;%-qEclY6=TjTZ+VBRBX|PEH!wQR^5Q~HbNtdr|uEQVmONSF#m$OJ5JgQvo5RfGd zE5J+g`)%k#Ug=I980jVSm0s^!Z|dKo%&9;}<+znOn0=k#R1%EsX2WxZ--jez*WX2f z0V*<{9p?Rkzl8T-8Kq)jMXQhJ8Wcl_OAC%)Y(fVjjEH{T79@@{y?6aR7{%mB`_%b4 zqp+RQNy~n4&zYS}@|~w`Re1@>#G#M_w1QXTfMP%ZX^z5OU(<=Q(W}yNr8>)rSYVaN ztypS(VVZ8^6sRWQxjuO)&Y{P&^sYkcSA56~+P;WGRbt$S-HOdoxcmA*$L$wMN6f&V z0cNQX{*e#Y*Vo+}&vE^a$A?1GBxyifWx-<;6=m)<=sY5r-$8y)L$I zZZ^d66a5r>kRIXF5a7@59ss;T;T&d8yywG9Kh^j>8mkzS5-NZJV-zZ#Z!Bj@{QPg1 z@934o_vyR)=vh;m|I+0&PWGUUZF}2oK8i*T7#30?ea}ASx0;YWUzu8T!~;`^aSV8dYixP8QP^#A)*+&z<_+` z>urD-W-@-qU0yDtt65CT6=y)XJm>@k4t5tssP&{&EUlp%-sRAcj)qBYkp4={YXdJAsm%PACicP((?#{TYEgpa=m2@bs|y8-67LEg#m3)9 z^V^5Ijf=m#s##NjH;w7`JrE<6eS~j^0GQYn)2kclf2*7G^NV>{mCnjDio#IJ4}3;K zVZ9L%2INUhXwh$ODvvvG)T@@1c;y+vY#%-{!IS>5eWXCM7>d-?iucV?JrdkfILa?X4xO1u1Y%Yn$&2k;y-?f{69wKXrutB|w`LZ14>E|| zF@=}d=)=|P^QzEG8+yFC!_wbPt`b`D?7vs@^qB?q5*r+F-PxYaSERpP75VoiAvThV z<1}!{k3vUSj?-cDUj=ApA#(^h8F2i zg2TF87C)2J%2w`B1*6XTOhshRiQeBtg$FPn|$6cL9@&@ZSUC8Vo(l-Gf1 zG)=s_TQP<(PN#$etj3APYyua=b&4VQU7qneh}$c5@qO)GpgqTT8!Ibkit-E}Fdlxj zGHN4}W=rYtByV@u;4*L^MNViZ^J^7*_Hrf%5Bu{_mmgHiw6__j-D-E8m-yZTTmE-A zPDLnif2OvmKp2#0gf`O+b+P`iMRIYr(spt#aeDrzzLWSarx=u5NX}I0=qTQCcA(tV zOtU<4;qLCXA_AEiX`)W^NUjnb#imSJt)I|S%%eRu_;vMBH`eWYNu<^M%Yx}R16oet zWgq<^o~ZUB_E8iy=yn6PvnI(KcOA^?+C;z;#=;QIHc8mBNt!P`2b(*RB=C;~9NcRZ$hrIVK zP!M|hZtK-U5obE5g_ew$xsuE6m7Y#BfgZ`TS88CO zQ5z#Uk7mn?CB@y)__2BXLFapaEmJO&6vN5rS4Qc+&QnSGHFQt4%OH$sc#V`~O&DGz zb7&OIn+8~aB)-Wov->xn!*;_D!RJHv-Ly@5elr#^@m<@Kz8;t7vw93jKebc1^tPxU zT91uwozJ>VzX2%yh2#sk=TnE)Ky~`5^}szbqLp~uC(7l z70GaSYca1tJ@2JI@<2Gc7J7bB_~R`=V8H=&a9_kL)$Mg=*;a=Fj@7G`)snJ@wqF_i zviGr%T$vyA$UOf&OuQ1Vy4|gqVDB~DB>>JIOE{;DWgJqv3n$i<@<`1=s3nC%+PxHcju)riNCi z;(5SXGc~7KW2u*i%6(s0;9bXjUk=3-(%+# zfpIe+D{1?xQ4dowtan^C6WN_ETcwpoMR%%p&iS5fb&}iq^4%Xtty`}&BgOztt*qt3 zLs^%WqdTpaGY(I!wkZ6TyJ?Hskl?X;7e+sTlaG@BY&h*a@l6$QV7DLNK%6dxlx+cw zs6rd{EX_`ZyW0nT-r70ho?EF|F6Q-W!VZ&OBT@nw@;x?R^eul7JQ-vNhT>r9_^NZ+ z=pzt{7JG&q&2~1-QMI52EZr(?ZSF*veat6S2KD(3vR1>OMw6o-FEg!15*ji>5_)g_EH+%Om)aDin}(?P`?A#z;P~tEuA387aQh8I>`e06ME-EJNRT!uju*JIiheNg^%ffQ=31pv9`SW4|VI-EY@ zJH4K;oEn;+D78WCdnXR<@ukkGdx}e$67HYP1zpcnu82?)3^YT~0?`})oC8WdnysCE zF8j->Eaf~b_j&=Sxlh;Iq95=UdxlM_V}FY`DZ0d&4=eTfII_t0AZ9-hyN}Vv&D8)Q)w5c%3PpFCK1;HK z-1!?akjAHRvN0??5dFb4U*RtH_k95ixuB!rBM1~lVTid$ZCblsh(6k#Y>giE-I?H2 z@I5|^_Q$kmZmy{E7EZScbj=RJd zO~&rF0W!OjALCm%OdG5amwXOGr{S-L$w;C%lTo*o=sTOZj37k-C1n^vAyV?$!7?$yv+ zLcU%QE`Fp4qli2l&f%XQ3xZi?ZD#`OPFl0{$vkzCW72T7(R7uPh50vU%CSs-o8!NZ zy=;}3XFved#(y1Q^){Dd1|`qW@^-HHI&f!8S8KybAD6PU#!#3`DHo}e9FGyPiAE1w zguzREDutPm{ z0*WtHd%AuE40GvhneP6Z%M07m@`O*vKXfbJl;y!>8m?CI>g~QQw)lF;_D)~gBlRo< z+kf_Uy}v-u!viFYOPsRm#M_SLQ)I8=H*HR|1$vEYajwl?6Qu^)Imk|O$S;m8$nW0H zaPskVAY7fi&@+y#!)%P6N$ahd!Z3H=-~h_rvYU{={Ohxwl31@}Ik-2Lnk#}Rq7oTj z>oA?(S+dR)U$X@}*+3Fd1M^u0{VDxj%hwj=a;+?-8gS znSix}kZ;rh89snTcpfO0Tq*JTmjs)P#L$1Qh;ugiKqG#sL> z$Axv%Iu+FoJGCFB@fs@t)88avZn}3MSeYk&!~L)A9&TeXP!@N@*tVYbVG)NV4Se2P zi)!1<>9xY-;n22C=;Og{6-4(^6nF8V+%2UmL+8e@gCgy4poUpVUflnd;)P1c-ld7tChB$`E;kGVk|;L;n&u0 zOB6g;)D7HD@0Ig!~=u$8glK>P)Kx?fz`@ zw7;@w-zsjue$me~y?1Nvd!8%;w-Si^V`xZxjG-%+rYIdhS|Rckd1aLNpz5 zxL@|9yPdYZ_|eXNPzUimD^y4}VnvPpec~Umans1nMyO-5CLh&)yHq?=bQm}Nk3uEu zDwl$P$g?z_{z+7XlOf4FOfzMn&skP)Hl zGo>@y%>Pr&lbDLo_o;mHjNQA#pEBiD(w41(SWGX3{kuZ#59cQqJWQOd3G+VB6eOs* zKH8?K7Pi=`mL3|{IX3nn&aC+6al$2fw{zPNt%$%(DO<8we}GJi9T^z-@}}0oE(h2z zcRe&wuC`xi3>sX6w7I_Ojpjad!CamhIoq%044mXnA|^(0lO?8umf=-1E!H(XjQnQr z%g3v$`mCK>N(ZcxH!Nrur zD}ZqKOr2=@v3OwdmFa6JRBLWb4;vs8k8{C(DtS5>ZMETtRWaMyK~DlqhxNCzL+HFR=^_Q*nBu4NZ8g}J4P zyn5;K=wsI$4AsjGHe$2oCvDYF1-aJ*Mrd zlDWe@Ef!@p5>zYhf)SibzUXrpFNpO)9Z^OYOo`0kW7B`CD3N>zpCc}DAkQH8=+V<6#tC)2K(~rE zwuR{5@@cQl!La!zPqhh`zbz@!S9?M;H@^fTc>@z5djV5EufjDZRu^tbn|#$LcEd&9 zcxm@TG=PYab{!jXZxb8-c$jc;!QL&NURgLW#Q$LViY_o%J1B1l3-AGNRe`_U)R3$6 zY-EqL#tY&hTDHux5^L@cYNpp=Pjb&YVM*?eqB|cH?vHUd)=M0MU*i(A)QrQBSfWOj#LdLihU1p~F;Z+2RA6;iq4I z`Vspu=pv{qJHnY1LdHZ^@r!Z_)Wg=X2Gm?dYH^#Nc79qH6<7P6**Bff>@+!^7HKw( zM6Gj+X)Jd(s1e0Dyr!4z4Ed$&mwx!lO5Ex!bMg&RrBpiMmQx%Kj=B2rQmWRX|c2vKNvXnW^w5oK^$yeH-s?^;qGwtycG_7CV z%VPb94dX2W{^+A;j0j*k#h`LbzYybT&0Udq0~q6%IOFjFk5~3nrTv_BsLPf#+xpzZ zs&;bS9B!jcexjkTF6EVuX;YhO=<1Z_K3iIeOxANE%RJFKqIJg#=fC5XE6*jD(BHM# z+N5_$&>!x~47ER$<+++W?dKqB--K5Q!}B@DM|c*a)&W1ODN*@tn3q>wpldC=5gqz> z&)4YcU!Z;j1d64lX?u+6Md>xZz<(X$33IzNeT`l*fzg#_w%vFg6M>p;;|^=YrGC9q z#agQNFVm`lp$-)vmo4$s=1+i~MO7Tm{f)lOCibD^g^8;&Phj;W(2s^LU08>*d*EA~ z48@}^ekF@_-Cp*%fPi%sf&yiN9=d)|gSW&CBDr#;RN*TE4ahXmy?&o_HdZ{AE(G+$k8P`2a^XIK$R$D1xE zw*37i-m{fmJ?lOdYvH)}BCK%tC#GZv2TWPfcn;r~ND_v$S(id2du#6pjdG)avq?0T z!-!-3sMz)lTd2#f*50MWU}WP-ePM_FI@S`uaY036Sa*ou#a#hk$tl6XV}ID3J(L3b z_Q>JS^=$;6#GYMv?@#5tk^I%M%gt}~#CQK)Kxy*Jlm|uZ^2M@U2PAE~HKl zHD$utzE~e$RGb~b&t`zCx=#ACd?S?GL$_ACbq%md5Fnku`$aFI<8RWb6qslB+nHFz zV&0ztt~6X@?4Y>F zFFv)MtT-TBSChNXl|Tq-Fg!*LKwN|n)_SUxM*+&}xPX4fwhsbFxx{FzGse&|z$;Wfp*e}4=Bw+S zvq|@Ldq>U(Xea=ni3i#!6tI6Vk9ARHpu#+cv>J)!G`pwhe}5l} zBIT8yV_L@Cl8HmEUmz=p?f*HvI@seGm*z6pk& zrh~4MVpFNZFZ~b!^KE7BV7&I$DzI#9EZ3$E(fa5Yr`fRFq|^nUFQg?*Ova4+1D;&E zEzf&1eSoa$!tv1eXAWf#V{nUzLd$0e3_4{m9M;jcTi*P(Q{f~hV(WD-1ax${TQQjW zb<5a)&oz{Kooj@k#~>Ew7-{%csr~Q_E*Wu{6a2pG$x~Ke)D=bP!}5&M$cVM&aMe)8 zIckY1D>#=j1w1q25OYG(p`XwMj#oCJ=uSf4CL$g5kl`nxl?e9n0_K)ewLV3EIZ#== z2Iq&l`emK}$XyuY;P?m#W{(TeoYaJ~ER7&MAdvr<*`+24nhA{FS(`VTbWG&!H_1D_ zodHl&C^~}GXz&G2Z!0?{4?Jt<;hA5Ap4@e+4GImQK-kQmJ$P_aHHlgF13SK{;be(l zQlGA;`I9dU_p5m7MKOzQur_I2FQY%t`!=U5FD~!QAG$|}6sk7_9u8ecUM%x~>fndx zZrzL3tnZ&NikG+8gIZ2xVxm?~+lo-#t=O^gN!s_AU-Y||-rx9+N+heu8cFFpS|V_tHEHaQpW|APNwAk+?5s z5?PgN+{ohf`uK=WolDjIk48t*e6@XAZ#1(pc z^z@2=2c-k2ER9m|1whS1w*!&J%hq8iB!szpP}XFFD1_etsKNhf`eufNMbkWamy>4l zPz|tD_!!6+nwUBZ>tl((Y_FY9Ayc_p(}39JjL=IMh*Olp^+X4Kz%K~#G)vIgdyaxf z9BS#G2A{z!nsW_X{ivm8>-i>2bp{_w;O{RW&rHfhT$i2)n1t7mJFMPQqhK;&JYN?(%tCMd>8VeCI2rittMET>0 z;y~Qs)cb73Pk%(da;r6lN`XMlGSqK7e*J2$vhN-UFA}Vx<_6nWvNcwk=*d;6hKTdVm&T&6pQ0Dcf77+B`B5xwJq9Vur`rP>g z`q$kLu#m7@?vP(NYJUZtWSqDFK>81kr4iOkowoM+I1#5AIhWW6 zs-u#a7~~D{Txra^=HGRTxC&&4xT1dk>DKSg4#lSR)GSZm8XMRNgLF;pOPLcB^)!O0f|3JHG;>O(n2wX+{H!x75GF%nVf+AU;+q+Z`1K zB|vE&(6L%hMx~H*GQe9OmFSj2ft@JKfxQZh7ZjF((qZLGZUbvH^!7g`G)v@*FgMWH zyy~j#)AP;9e5Y$wqs*!Z;Ms0R)RiH?23Y=6NjuA|QLJd{!-~esg zFU0F;T9ULE1rrtI-iqn38;myAG`+my%7vWKKB|m(+w7XEf zUmOiNT8K6l#Y?xR1)Z(QborlUKWv>>*N~5wR1np_q2?=6VjRIHobh9Qkgvy@y}qhCmWTMwmWmjTNB?UjW^Fg>2pL>SC_Lj1gkuUmhm1wxG)sTyitbp&zXxVFtR{= z-5ICG`2gBrqXmQ&bNs-MdK_M{V__A6gJWr5cUwMn4TRsjyI4K+(iJ!Bz4e0pL=K^K;eV!V zZoNO^>4CamBGMI-WK^DMgD=0)q*%0Lx*YQQ#oMkH|1Od;EH$6m(5(j1AcF9WoR5i;iS z5C-05Q!rRWb#=9m{D)gsK98x&~9QGkAPxNLs|NmP16JBTsx z{wq=?uES-o7mc|}x8a;@kua-f`qCNpaCyOgOPTwR)~eY*XaXdNUJ;w9_Z@;omBpJb z0o8xs)o4FFLh>1_vE=Q`I%PdAK$`dk?Q)@6N_1Zx3{bO0#eZz}N^vv#frUNuztj4s zwPZT}!w;LWt{2ON?r4%|nCl%QA>pstWp%1a`qMBbh@8qXmSim)m^F-z2~#q}N8_~) z1*adEIP!6`^)AVyvv!V$)-+h?F$b4k9UTNWT^>-A2OEgjn9T2`0+0WsEe{*`1+4oo zg4-PfDPrUTx8c)nOrh{%V(;eIhtI~^bM@?})$D-0iC2uF83)ujNm^7TIV_d+i*w)Y z7nh!nciBgXuEtX{{3?TLS|d;&zi-8+w5j|gEiTi2{M31+dGs~K!27#)D_!1Vh+Zm) zJDRVIGliUfV8bKyAmj<-J?q@Pp_%WqX7ws|7Ps-&R^_AL{SA3ycuX@IV?%5HxZpCL z`eD^2uICZUce`<$7Qb$QvuJ^DBt)cR+nb~a%6lFK{4)L!W`OcX5>Kl!25vc2 zu@WuPa^!zpEkQhVOpXMIpz;8)vs{)Ww_M{;K*ptC3y!Z51}bKsXO$Cr@0dF#bPUwS z73DoK-lR*s8KxfmX65i-aCFDC(PrRJWn#sV&om5W{#<}5vI)louov}*#cvdJJ$9|Z zq|V`NuEY6uBAT56i}?(*SD^xZ=IUOvse9})rE|w-V!L+Mg8(ZE$VbI%T6`*j7LhFz z!$%AS^}FHOQ7K&Zj9+ba-^azqixS7bqvYn92+3(o!b=7{c<|sM!WLtR5iH6u;RlbU#1G@)~4JA z>lcb|WWEPL0}nRIOA6hs_kX@UXf)q`$IE`-U4RD5^&$9t5sfy6N=t)-bxijgY@)m| zS4o*yqiKYT(p(1UWJsO`?H%K$cjonP0cwAlOm78myVtd9-C>YWOC|qKkUX8HUqBa>z~4~cq$sE9n-L)J_WNx z!o{8nQu=|CTr*Ff<27Zy;hi=K1?QLgxLcI@KZoNE0Z91Jqs|=s<+Bm!<_kTFmYLgR zgeL0eQG)U`C1zb*@$-lLnmlzc6e)YeFFeuD@XhJTaa&E3Pp!8oW@>ys_bf_!Jd9=h zlh@>roD98**_w^1{fx>R&5)PFr z;7@~Q-5vQUcG94Q(Lr6@BQm4#MG*_^2ii&A+6qpG+o6*HI}8EhxA*;Vt-}l2d$5A;=+8O2$$8 z&XQPkXSfAe^YOWhb?D{irFoAjQd`e#S0VD>I-z}4HmKUJ^c!9L*)%6Eo)>^;KT zE~rpyUjC1UWGcMLhQx4#@_*-pV(!(g}!%uaZuJ&G{@Vj<_JH=c68w4$z zy#kgB{paI8U*ttnyayt7nZ`;D%+0lZ+9X_fw*A!Db>!{Xh@NPpZDnWcvbjixPaA`Wh|Dg4TOv=Ek8OJ0E`fS6= z)AzIdXwjZ+)bS_iW20}~p`4tsnjBBs8UnO@z}}m!|DKx;g(Q&4<}RHQW{#P>lO*RA zVXk@SSTwEQ`RrH74ZYY$B2&Gh1QQPgU-Kw;k4@%E`His{H@(d7kBkoM6YEfbDt;fPvY0?9tP~Al842ll;v%bcYe znG5KAT#&i_k3ak~mPq7!G%Qp2&J`X0NqnMQei>}J&~(~_wl}LSwW(NHhMiwHa{|jY zFkrk9KsyP!rz`Q_=@*@Y2;}x@WI}FQH&s}F63H70cbNFnwVcc}Dq#;qQQ!xWQuP`p z4hpX^{#Ru~7|YzAEP# z4mhyJNr2XbOntEv=I!t0!aj+_K;t-ccm^Kx%pRhL1j4U%s={5O&SXN^)s7C4A*PtR zwz`4!pRj)WrYgz#fWOn&A34fTFf(Q8wB=! z?BYU-0@VBYDqz#g3B=>upCR*Amzta?UN{;w$0X=5d6OMVdv5b^1YiGIA4 zeQeOt>!^GHHhi!(Ca`h>D;ru_t#<>8ve$r;ij}zdY=}L3<*Owmh)kM<6eLY$2WcJ* zye2I^pw2U=Rg?W8E%<*bEIZ!I{Aw&>{3`~fDDYzjva%^=mtVh=O7`$U*qnML@&U)j zA1qrjs4Uk^p->GPz*al{V&Nww3K{B}d`%M~xb{s(k4`KEvh_8}GHJG&+mu10P#3;& z$$ND&V))o;(4{L{;pep#K1`R7yzQIq;uVbJ-w8dgSYHJ%D18iMApU$eEx*WUSin-^k{%_&4GED z*WUR~yJu*+d|Sq-2}7mpUeM#SfP3NJ`@JALA!;EEztm6 zGb_D2FpVlOq|rf}th^bpCiZ@KtA9usoXrIGaOa=PZLPv&8g~x_u7kgl#vDde%ZEw{ zS0g7ap+nPEO;Sg#g{8lS-oDYzH*0*el52(pXQDCJ`k*!64nu#rVHm zL{AKb;}rAb-R6sq+e+He3_O%*0_z$u)|2HE!sk1hAS%5 zAhR5MFk3v(#t-N@Gch>F2O4VFVEZ#B z2t+#frUaj^maY2eTfJeMscfUPvrXs3O?b3*vE~8 z%iXgJ*hMkKXy+U;z-r#G4=h0!PC5a9F#t_}Jog?Dj(d8tG4Mbz;XOT9&*X>p8+3nd zf~o8+^SQ+yCM_JdQ{wCB>Ev{%H1`Mg7?_9}OXqukIz=OP1ty1W{T6QgvOFdPrEZ1C zvfc+10n64iQUuD*WmWHKZY-1Od!2;yRr6>~EaQ!jCqG1bE@m?@ojgY=wB(r{~fx{0~`pBV^(gB&Dt;x zgdfVGX9jZgp5Nj#PP6K|vqN(XVf_8vS|;aC%nqQkv(FW*RN1zEsH_mO__q~{ihbOU zU(B{G*#AibJZ}tOCVn;KSQ=To`h23?G!@XhK6|}1CqCG3G_vcVU}55YrSv{bAUSjC z`5Q=52R}plcFJ{~8+}lrc)JQo8wS^Ub(`t>S5+b7Mwg!{B~z(UTd9Ug*d%`fr!vaY;J(3#;-JsB z9wWl2IHN*6|2DLDz!eDhUf%+xGw}{{!^&e#AjYqrxtqcc4WxEnXUI9rZ(=>N!|{}a zObAn0-Mw;^-N{-tqM*UJ+_ms8BEcAEZM7Zyi=%Xj>~A61^C#@qT~%)d%|bCXm;Z*Y z#x&mL>;g(`yG|e&OsD(C`FPl&z)%`E3U%poEBH}3=8&c00I}=YkXr@r^F#6!^RxyY z4Nh$s*{3Xhu-qL6^thkEvFFc}gf&o($*aHy{d!B_B;9V@%$=N8O&`s>cfqoWT5Qec zlz-91k_*P=lSszKQ2C5}3q;>-iRx)PkvFdrnjhueKRNK?3pgG6Tunq9k;h{;@|q-cV4vHjDdV7VpKW34Gpm6dVP-6Z>Hsc0_w$Q8 z>QcvA*RraBb8&9ZDUGOwZ9zSI7Qf8OLamxFY<8M7Mwk#tc$kAI?(o*6+aq2PJ%8q_#F>T?t2=|(u{Z$9-_Po^-b{0lY zRU&uDM`{Jr%nwTIA*I2KM6+>apHmFTCih`U8s504>JW<@FOV+WilX2BJ2_EM9~s2( zljvV&A%e%z2IBYgloA|r*CGKolUdUN*bQtV(oyecpwLGAR44HFLwZW50-D6gRZmEjyaGuaiHJ{lZIs#n(e zchTqjQPs7$*fg>2C4u<9I3C&@nnNI%A2CP%#e2@x(dVq= zb;_Uq=(!;ZOv=o4%4M{aNxx4+$$X%w=cZ0@|V3udZ!bq#$$MitZ~3|`HRBl)y$0#_jtIv^8L$$ z{5-i~ho&t#pcdr{;$jxPO&f+}bLnHIL)7Ui92_|EcF+aa;OCMQ6_Y0NN@~yTQs&t& zV3$#arcx+;-WbE+ihX6xP^mj2&r*KL zx&OB@^~1ZesWacD*}$IPnQzhzbJ3q*De{ib1E&{ULWw>4yj!qVKPjWEQ--$Cl`|Mn zuf|=gig)e2L|wmO?|4`Vowi6w0LA}v$meTe2r${+es5Xkx9}LME>R~CAw>(A6(1F= zH7S?Hj-O>p>e(d?q(FM4k4@S8h0}iofC4*O6HO&!T!nH3!?^#IGCQK&zPy<9Oah|V zP@-lB3QSgPIa28x(`em-W#G4w{dUW8keC+z?a>b*A^sN>$DQz;@gsR&cRP(lR;K4~ z9Ucy>CkO;+=N1;8XfTV>`ZkOtTcbD#Vrf1J(bQqAG*MlH-kjm@ZM8hxl+3{5=p;ww z2e33hKl~*p?DBM5nW%^28|ltdwQdyCTV-Gj7%v$b^*Q%vuq~BHnkn#XJava^*gs

    47Ir6x~W>+*J{hkLgKv-<(?{?Lslmh~jd6ZpV zIFp(KQ1{znXnthXg-g*U3|NGR6`67hO{g48s!Tslfm*yYUO?lBXA=FB->ut+a{ftF zPxLr0celndhSBC{nlt$exubtSJ>HOK2xgKxQ&2YS9DO}-4yZfqZG4twAL+x_$s&m{ zNs$3IF6*X-=2=43_)hWialx|>yxG$fM^?idmYIke-#AH&JJVms-d{q`bLHFpodbA?zP#gVZHf5@4@Gy}OpdfW|}LR=Z3S8_}7kHtH*+$|a)t7@j%CS_7vQUfyqNU)h~?16b=@$RZW?ciclieokjxAWcwuAIAh zkX!;vtW5WeQTfz8EDoM4hm3f^#=3v(Oi`CWM-riU7 zT3Q#~U*X6iOi~I+R~YsN?bsQ7w#mZ|nbUdKLAxX%xCmfQ$tEq{pCr}2H?CeKFe>^? zOLkZfYDFpy(7qUxS%w1d4Nl235%XRpK(wYhSWV_VQunTMr482jeTnRX{p)|9qGFz6 zCZp0;x*vV&xlU6$bV2SG^$mi6!ky2-uV1UmIOvaMt&+q;}5r z{`(6-7@#zI{eaD41Sl0UVGJ5C9!>QaGtWF|w1w(px@$=jxkX;??_E*l?!e1+GlaMz zv!cnANa4L!!X5GHO~n$&S8i`Q;6Ee^3}VJ0oDQTNO*>!90Wy1_xpr>EXd1~WXaY{@ z2bG#y3%yo}mOv)%KEs2@h5)Htg`k%9#9aPR)9gM>UdtsA4R}X7&zDtliAAnND5E}uOZv(pelSSp2I0Phc zadGjj^IjF{D)RTR|QZTvEVR+R`@YQY_7ju~vO zi`*GSN8Wz2Sn$D3-kPvq?7yU&pj~No^vmF^Ol9+ef=A znb;$AR0M*g%Is{NO@a){F!YEmAHWQcR9;t;)hG{g@(;FL}#3_ru^G+X}H~e?`}@X)kA&GY>$L!@~&}k8P>Xp z31bl7CGw*F4|?St)a-oy&fhHVg1D>crZOaCwJs-dbr0kuq?}j>3t+nJ_&u8vI?LhI;3krcDtG+c-^O&Qx+4JOMLv>fv}X+`V&c_`%y;q z)^`XpJll&ip0|G$-}D&8Q~5ghcAsl{{os#X9@}aA4dV_=$YsN7AY1M+U_TC(<#-RA z8o?65^mHMiq;tSw6iYJ!c_8iy`?d#ml8oVHttvm*$dIkf`68AFq3rk;^11mcE;Jfz zqqJ8}GFS^{ABT7FRh1(d(*1v{Tn5Q%F`{DeF7ftYS0r1e*jwfabxr13od%<5NWJo- z_y1N^XkG!%KZP1KP*7oaAYozrj=5+0PRwr2Xqi2k2rq@=Avp>;TvjbFn=-9e`U=L2 z%({l8vZ6t@FTJv$cD1DuRn08oWkp4w7<1=_{ETGO>5fxBb^w(3%MKbKd<+|ZAnn<- zNfJI-6kaW9J3@1QJ`CCfR@g`UYJk^}HVqePhr6YS|Wzx{mC zVKYqSd3V`I=dxBEbUVoPLwFpF)rJbl>ezf0+ui1ABTLhMh|IdCH+!SMyzBD!Lc|TA z*HMxh*U{;w8=QdQ?s)gf(Tad-+MXNJtSnlEtX+7ws=O+N9NpQ4$*Rj6Eb#3vtbz*uqQmTOQx5fod`M;A`sXWK^|nPqi455T|OIC6c8bsntR7+J)0 z0;Jvxy) z26toyPb%_5{Eej1>X%G%5wzEFW6S)kMLa)Jzm58kRMk}mZC6!Hh)iI zDRg4pw`xJ@Adirb0P8c zjTkd*n`1%WG%n_m)LHpI>DriIvt2r}zavw6UA@6g%jJ8y12{cY7{-01{n;NbwfN7D z=yMn1*}LI@|5wJ|nA5tk*q87ltmpGbNlJ5xaB;bo+L%Qmkk;YUJUU(a`&gN+K;KKs(^RqwA|160la z6}^e}iyPmW8EGiGdYxQq$-;aYIA#XWHy|-S&qyHhx7RW$trXDtbQ&o8s6mxtKyX4g z`U0>?c@ck$nwJRbPku*^8~6Dt{|i&V8!kp7Y$0}k986hEU%;b=u`xN4k+PNVyex&0 zCsOdZQ-D$-DP0Ns`Z5%>5J$Z$^A2g>zOlq|7d|T*PdJow2iWKf5>a~1L(%b4_w8|V zkp;su?6OsC!K~@ZxbKR$w3rOII~m#|IC`Y&wcIkab^C4H#JV1Zx@$pO4&}xQlcA!s6k9x3*;f~e+YjeZ*m zTIq;Kg~GF*(GdVmg*$tKDZNXLrvez+YV{ z?8CeRj_*{i%2@MzcA?W2i*|2UaeR zoC_we`<+{q-Z)OgAm|P%KaGg%UALxO5D>HZh3Btgy*H zWEyC?dL3JkU==}qE28rfPxGGk=3S2QTP&qHJqS^)uU{RMo=&OUdl2g+MyjD*K4Mtp zbG;Ga7O6<-%_01I4R)5D%mQrEO;^Sv zK(XAoOu1Fp1HXM2m6^s*#JRf5ZC-RAsAd`&*J~5>hWJfYf2%f@X#DC>LKZdB9lSF3 zty@Q#BHEX00Y%~0-x2-KvAA@Tt;1x%cB&W-fptn`C8(cA^;DLW+ggA*%%130iA;}e zgCoSHIz+ZBYMAEcaxgk0JBKcE}U?%;C*;m5*HAMTlUzhHfe3Dm=i8O@U)zwI~Qcz$+|7Gm^$^5^Rr*zJI^?3|)$^G=jWQ_Q_B z@t7O_F}P8 zrtug+#+CVVOvQ4#Zb};u^r$H(1MC&nvvRjhkE~(+DVTLlh>(E-b9C$@z66$kMld*k z88!igX@f~FGBS9^<|Xt&IbOE!%&s*~%_y5}U$mc(xUr|6GysG2>>!Z>guwTR8hEzi zz~iow?tip74-svn08J+#x>MI8q&^vJb<2HC79QJhI<~Ztmt>87Oj8~+_8a$9n*@;8 zscKvWJ&>IOYR{qg#Y1MgE~rDYsI2WPb}|`mZ^0fZg7-t#rq`Nr@2`~ryB^qCLA?G~ zxsE9(3A|#8i~_(6kry%fI5=2+l!H7j_=2{rIDAo#mi^`P2jC8gUTU%Uzmxac-_FM7 zj~R&AUt#(3Hud>yhM~C~^<;_cqAtP4UXhXaOf<8x$L+k)VZLu|i0#$aMp#&bn`9-@ z25m>!@nxnjPtg>e$R}=hEBD)}LE92Ho@rjZ8jPXGSMRs#A;AEOD5zJjducGU*W>1R zIXSATQo7hQ57++pSI}cxMK5X6wuXk+v$>ZECGoLsAri{TJj`r9$$Tad@8ICz{5NxD zHj>c&Tc=~HHh_Lrf92a^AOGpes~<+Wmyh4eP@#Xw*V=l73GpfTT*OXbgxzNI7bgJM z-&mak_AeBm7Vl9C&_2BatKeGJ)G+dy;@i@`@1F8X0YEk*{`;05hl_T$d|i%=gujDy zWVyol0mhW}zyLqr4M<4Q(CYzf_&a$|MpjpgpAs>>?WV(uMYWUB+`==Qp@@ESz)w%? zYmpa2OwAQ2P1>|$?o?gnueb@OET3_^Bnw8%c}1QLSWqOxeSc*#cttmHztnHlk;USEeFD2TAyNX+%i?zvdwXx?!>$$Bq-All1!P8WBfwCI$CK6G~smJIxg)8vAFHp3dWL zgh{yJV@G|G!oM(8=#c*9lGy+Hp)Y!WA>aqzRH=Z~!$7UtM2b~iz@&cId9o)bX>3k` z-8EVcS=&QV7U{zU}_C*SdOH~&_EH#XVG)${62 z5x{7VZwf@-leH+G+<8YNJVJBZIN)~3<8R;95bjjgE(G_aiMnd0DPsl!r%eKD-6_C* zIpv6(8!%0~n9etwqmkf0#f1ps}0_%%&>Tokf$!t%MrZw+uwzuLoDaV!LIp z?tq*~JLLp~>6ENY)WnMkk69i^*8akI`Wg{nx06|kCS|^3%3?lS{qA|kf#;?5F(5Wg zh?1AX{9^mQRQaX~z0TX^4{2eejQRWo;P3Lr>w_$Ao`sWdXVn!=HfTyts5s&U6}D(x zJ2?K%nDn<~07Ieh5a)t7cAqta-#w<(_|7CFdU>|@0lzYENOs=M z?o1PQMK5kn`ab8YFo-wy+{mf}H?(4gTH~Fi(@j~?)DIs=$E{?bUs_*3@wqr))Ia{+ zY!vg6wl!koA3-^9#^aZx&{p$JyPY@PUb2au&fjNB)6(c zxNSmAw=N$Tz%3tBw(kubMYsw9HVO?KHh2WRXqev?E8j!kk=?m`Pt+<4GxPHVI%y~j zktLo*^bpPFc{Hk^1UUcp1syE)Ck-JF2UM)RfdDynApD0NaPzX1`{D$ci5~flR+*XQ z5N`SU_(-RUz2@>n4f)>$H?Lc6;*%Ix0GfnF+h!PGIuc6sto*(6zGhpL^ZbHp8St(E zMy`6M%o56M(dZGk_}=3OA+sKvzN}XR8QPe~Ta}05iQ_y_OtOI~VE7rHQ1X9_E~^3| zjeYsn1}+f4kn_WzZrDK~z%UDEWES;Lu-yBvc9b@9kiKIzt<@GXH{ZwGqBVX;wYRJ~buGTi+s+T=3;9qNpBglj-v8dVhZu6B1PU!b*V;puuX`*M< z6a#60Sal5^?h9siU0Ka6NgfyK+tAB!g_8vXS!PsIgIM^=BYugx-5usxoBeDr)S^5Z zxUHctQ$Dh#^NHUnXGCeewKsXE zi`@5R*{Wd#ta!^TKi>5^w651x+`a|Foj<*v<+$}cFDw${!xln9b##fL)G$T05p_jV`Sm1h`=|JU zrtHBwr;mQG5Le;Vj@f*B(wVunUhY}UZP()sjW#p5SQ(^@#SL>TEdD1_oZ*ur+qkhL z3oEbABSWxLn&#zRv3$btZ3`o?u`q9dKfVOFNnFPOk6VEX>2L&OH9Z4oMA7zbawmbk zQ{W^om!+pX7h$p_AP?yggLO}Sm;(Zy#t8thEQy9+-#sK$Y21qx0ik|UJ9^&*P3F0MfoFB%;W%=OsrYz~J<{gwrr|BE@ zo*i3mvG+zOG5xvb^jfKj92>SCJdfY<5!i z5A4(Xw_1Cv9$Z*b1Bo=x8d0{XMif=&Bg6ms$Vfn_S5ax{^(gAp0b1aAZ6d~g+sEfN z(DQ15obf=94sN7!*&otcZ_d3j{1|WiVS&Se=(}RBfUG}oW~ISfQ!1{yO)SBQ2AeNZ z83JfR5#o;tKZ8bCgX3a)#9o=C?7T!pNKx{6+yJXce2BYXeiR^2uL%4EfNl0R%VUf9 z)ElnxshgPd`Vuby3$$5`R{=z0aMFtNrUUe*5~dGD_xu7V=4?DYJ(tsP?@fo{%d=ql zBSNhhyctLOY+eXxEjAWY_nFC4GC$?9t0Ih}nF$i}gWB3W8#%Q6*$i}S;tinc5m`w& z4EFWvDVNtZZ=EfBkhwMpKC$^gnF~e|NV@Y@4&t&e_j9TFg%teIY*OFl?qVQeI-vK* zUv33n_PAM_0fKXDwZ-RxaPc1^PBFN-daL%d>Qu&0a(wbi@#Z8n>r5*eqZ$CPs1LxIeq9A4# z(Ru=VGAF}Cq0O>x0ZXi>qF*RSsX^;kxS>|^_X!X6hapt?g6!csanxeP;n-iw`DZNM ztL-78aYKKhTwF09-k4yv-e4n77Egej^!p`{q_Di!+gMK%?*()jOb1Dra>q_^v zdpM8@h5&6?(@T{m0Jt-^@s@3ng(=(YkB?_D*wX+sa@;Gq#gvg3Ys)nmp?r!~FDc7d z4m1p@?8<~Y^tY#bUhCwh{e9hDJYTt&VkK2yZ8f{UU*+w}L=ePl-rH;C`0q^PNZO*f ziHRN&D@qs6V3}HzrjelnL8dR1yteuLSr){!`3CfT77X6utc(aHn{oM=?g+n620L=y z(4Uf>vM%2g)ZrPJ0#b+t;1BHO8j@E?Ti=)C z`)b<+sGrzVDss?<$4(LZyq~65MJjOd8v1PIoM-UA&>YHgpr&{AljHhc0j>q`>I!PO zBLmas4Lq(_fxFX|;4J`;mFwQOx`D=iJpjy@nFV}}H`v_Bk^cbbW^tkS%el8a?ZNKRmLETgt}(-S#fOtIh3eSAg(+0n$;vCv>64 zKyKlVMr_;k@wx{OsR?X(^v0s@9RaxE3y{q}wBcVjTV4FXT67BtfZyat49+!Ob^G=L zG(3tHGEPdMLBS8oXWo%M1NgIJuY{8oy{Fa*@9Exymw)*}NuW8!JAd+{SoPsnvrNPSI_rIOu zwed%H1!UdkW*QsADRGQz`Q0i*+8w8@YrM8ERaI5@e5bvE^y69jjxtfpXVcXK#+?JJ z-Zd=Hw}_C4rYnuyyX5pdPm_aE#1aTyi>$w6iX6K$@ak$ z>Dm$=;5XW)UvJ6qEb$P4*aRiVxFCHQIz-9&1?2SlaqC)G>lh|fG?pt0D^qVmO|296 zd3EiwJ8^xk!V5BO`5P_eMV^}^ickg#e_egojlGY4AxYoL^j|Tf?B`dKsRy%w&shcF zU!V^)S~(Cc==>L7lpdK`L!_G6JC#7}%|&l^<&okE^|(Qh(wk{XY?s_Xbu3m7=4b|g zXnR6cH>!&k4_16bQhR%;L0_3H;g#YArTdvTS6RVkgb#MAi~fZuEr6l{e9Jf6xS(BW z*I?#<-q_%+g*-ZqMPQR=l*mnIEQNe{J*^i+-uBlc-YNn=*^$kwzgajbr=&FPoCHz{ z!>_OjC$Q_&j83%q^4XWfI>sB$KYo%nZIO1?I^;Uh*n7f8W#@NW>O&0Tzg zqV&iYp3vtYz|Z8$BNTY36g=?|{fWs~3VyLi*4#Cb0YwLU6ZXgEDL83`bcMCr?a@E) z?Cw!>zb23TPr_mmzO+Xy_EV?Mt~VVM+7H1je=d9d@mGQEERPi@cligkk#U{DPPomQ zJ;qCZYQ8G9-z9425P3TqpSA#LB4b~#ypL@P*xa`1A-c?aIfpD_D7!AIcAz%#hU*=~IwHO%_a5VWJD2u>a(kg^Dn4S<%_f`Vb z_*t^^%Ve`#gA~p20Tnne{L55154xrtC5awt(nB}WI3J@8XGQ^Mxo(Sj(&(yx5PnbK zijsgQUFjntz)NECFYK?Sm&#rNMQ=v&Z}Q^R;eOFbO4RbMf!$|005@Ghy^aPjdcvL+ zr;hp4$h24!Nsmv9$w^6ZKP!x^(m$RZy)!I9H}$#y`CxU+ek`9LwblG{t+xeQ19%VK zomX%nH1f?Sf0(9_nF+{0jv5Nk9Y6m@;{fVBkgsZ;i<1WJkoaMts1eZqIWU7tmcRaw zp@MjoKLD@4u+tXZa`wawpd&2MlPYp4UwK~$zO@6aYqhs<=wUnsQ~ZfLD=U@qdSV9C z;S`!F@qKYBh51!YPg7q?2R73&q`tcW$XWgiAQv91U0AznpVo9}p-*qtevDq}{!ezG z&8|#JBB`ikPU%5`Sbkj`+93kJQEqavY_VYhC)w zfBqb`qJg&k7z3n`3%4pYYlU|P!~l?q!wD_5*aGF`6giHX6J)S+*lfp~FP{cFy_I!& z(<=x8#JudDm44;ry0J%kLsN%eA^r0U>^SlT*kf5!Rg}!!0);X!a6BYsg_cxK%*{Fi zh_Jr#U;&Zd68|PE4lZ$jy`c;Pt}3^`!hrF~t-P*2$77LDghGKm$`EcQ6=r4-XGw{w zVjf#b%IeoUS>dUA0It=Kn-kniE?Ynv&sR; zb#LhF8{k`<(}lB`GbI+0TUF^`omTDp_2bs;`oU&dX>bg?qnT9Mq@e+bpfC5D_P%PLTm@PVijr64O-;DshJ~k$%FS4nYCAs>_oye*HBm8H zV<3pgcNWZ?9=6S5J_xVF%&7YO<vTO~C>w;sgm$cgB<`vexg_p8Rp#s}Fg&Dj0NFw|a;!l)p1Dg+iU`y^X+ z_pbBLr$}B0>t_0o#QsPVmxSgoo$K$(se5AgU(Wx!$1!N*x%}$G$Ou|E3p2fMqY(6) zxq}$?vgx|yVoj}^+Gx9!`|*=&Qw8jIf=Z1KhT81{Uc&52Q$p+CBp7p?!cv5E5YjDL z89rViQiE0v(s`<@&0Y3Q7e@o{(oP;%SSGLR=!ejoG(@`VIG=3ut!!a{=NRnUs9F2Y z*iI^y?qIey7{?1WK_$VE`hx?zIH>|AO~O!PA18T8>_yw^N{X6O32fq7?&_KvGu@ue z{vh0EHC_Z&!AlWqsMvKO&=2MMUye(^qxFp61#i6ix{_pc=c=6irN~p?M}~$yp+o7{ z4o~W?mA8J5?nN&hF3pS*6;o?3Y@PD}@GWjLew4*99 zxh*a|QuzzNMNcCJFho{+aVMTVc!%n2m(8w5%j=~RISq?%iZK(T%ZydIy?pa5sfYm|{Z;A{-*&Vws zK1d||M-45B*MKuhIx+UIh%`!HV9&HvW$)aNjuL>N6QGlgp`rE)e1rMN$wTSgK%6#@ zyUxs=od*xd9H)-LXWuldW_YfqT26nl8GAW9DW(==%4-U;g!RD2f*U-*iH5a-!|t#+vRCE_!`~2m=gk~O#kCG>%qN!PT~zw zNn7`T+y|5pMIgXtw!qNKT6&dSD-SviTHqO4ZcO1rrXOzDPP#yvh}UPbtLQR`Z(C5P z=-#&6*L!LyUaUg$>npWF(^9dUff`<%y~*mV6l(GtJ=`Qsshe?p*S9o(jsKrM$a0oo zilgm{WP^n$u}!}C5?GBmR;nC@UWk@W*1?UOMcLH)D{R_XUla35} zQf4LJxAbPbF^`yYdElC!Q&n9Twu}6Rb&5QOU;7fe6Rq(?dpzQm>f^QWSR;}eUHB2~%k*RzeGc~NI+2O4e({5(pc0@Z>ml1U?{*OPja^q}iYA=_8Iy|=;Yj-$zu|0pG%iprQ0w+KYm=VgI_#v?H>|7 z*qf=5!%c24OzPD*p2+!FGI!7puxw@o)Q6y}cAd%TI%=tuM6js2Ym2DTlq{JgXlEZO z_qArm_pDLRjA0AWGc@VLU^C0cM|7a>1C7ewUrS|C_>+tvtv?sBT~P5g=1dj3i(g|Z z+MXx{GSpl~w5t$8uO!=GwYS#a3h3>};0^1PD$S?0h-f>kURi|4d+D)$0keVt+f?uf zZj^t@-YPUdo(AUv2v8lt-qNcuwcoDpBAcJ1pD0~)D&HfheWEwy{vKb#<*Er+;HQx5<>B^p7|Q$i_Nxb z+K0L!cj@E(1PSggjY*VV*|xY2Q?}#?+*6;uC5%eVq5vn`8rGiBct#h^T^~5jQ*)H9 zkwB7cUpV~4QfIx7ww;Z;PIPeHpE@d=fAmnGo?nc+!&y=po)>fnDP@S+jex;A`!daio)|!Qxm|k$~4;>#~YG(rl5l}B%>3Rb}C8=FJxj z8Vd_6xB2{;V6_F78_Pg)6*kehUr`TwC$=CpY`Ix!HN6VKZYhmQbtWuH0na=WG|PI< zp(KQa+DL;?sWM{dtnD*M1dw-~&&2gNB6Y06b%Tt&55QqGIqjmIKQq{otE33rb{NE! zzCv4(Ph>qtJ#w1f&*4jY-rI=>E+)SHBVP%xe>e!7afxNsymTU|Qq(Qe1iT+)@pQjpL92cRs}zXi`$WTELNoxP3T9aHH>#ot4yz4?S; zOv1?bQ<&iJA&NGZZBxLOsm=+;qdM~)(6pETugmh{zC95%U$o3SiYBEA->(ge-ahXR zr&7LwCBKniziL{k8aoL$yXyG$xs5J`z|0-8UJ&!M179ZB?CLOik=yJtOuz9>X3ZRs zd}VOt_LcJ$W`xQ8(qqmu*`e0uJ9;eqCCz$`0$4cf5=h5QYSj3eV#@S=1$qMgF@i3N zf?Id}0i?Z=V;N}0Cj9z}9^@a;P`eUc+!l-h8m_Vf1!M=@u*UV#RcdwJ+h%D=dB1I9 zQXiUk3b6~6D>SCx5`L`=4iveWoy*?0T}da5OVe4sP-b12@|t9d5>tSz+D!JaL(VNiHwsF4Xb zwa%@V%-5~;jJ|VqzA|hf-BrEhG5X^llF(d?nEjS*f7V>3tz1yz-z%Y*H!gb%jrB7~ zkB0I`KIZ1|eqp4C4Ls{^nvf;@@&)hbQE>s}a_U~Q5HK~V(%eYPlq2i3J=E9idZs0| zOmz|D{4L*KLLH>GHw0b|C;X~;Uvva){UT>yhR8@UZqc878&R5-*>24GU6c$;)jw8Qc>J3=SKo(Yb+&3EmrRJk@Ee$MHC7ZX!6ac}&!*pL zh8N3NO{l!s#^H*O=Aw;InR;r0&$ZPiwA@bYwovVFLm`DLQ~#ZqbIRYI#`WiI>zm($ zG|!NT=YWM*H0WVj*ezrO0(bafY&_wo-j4_;k&!d$BB#hZJ+C#;gc=lyX#9U zWZHkDo2AGPtUB*`b}$f4pB0&^fZa)Pb`3WPi4L>8mT6JUnx$qOD=c5SX@A@VJjoSZKE7+qRDeaq$rU&`p z$T;7p3`&B1S}+SMGFx7Y>S;| zPiK_`dobVpPf0tUwEuVYpnSloAu9oQxqLnm41!ffMp-j9{lst z#bJBC4*$G@Xj@~Mz|l7x!yMLJ3AD=(*6ongI*3XxD^ql|H^lswK5m4YtR7CN+b#W> zSK0bFJ}ZZhB?O!TJ3h-4;^}w?20EY?vNJG<5kC@TVLE8>4(zRVFN#X4yHfGm3#HgsxpLvlj5>FaAXEzricT;qBXtD2DvX>7 z{0aQS4*u|UAXLWp3QDt`=q(rF2qL87J9^ShGAuEOjd?fCyiWZVf*vmdGh%&nk?+EX z{?O4gu|WffF8IuUl&^~2h(^837B8{~m?!a>FbjkGdrmdAaTu`Qb~i-O^gh~jiPEko7yhB}f5u~aqFwL1Cvei3#= zOqk^nZx{Owhh_^isp{+p8h16m5q%799jUgj*NRD$pI3MfLN;|nPgStlgqd?$d_<@Y zDvS^wBM^jIpB!|H5;Z3bLXo@tt5DZP&`k}3$S352GK~K&>MiVhVA)I{^3b^z)xbLe zkG`C;iW<2(-{T*-!7X^rl z>fuHEW4MS`1f`&olJ|C?q0zQie418Rg1n06V^mH2g4h=tH<=BXthBr0FV(Wzg`(7a z7x~)SeZDbFbq0)*@PDLQQ77Fi`X&P z|LuwF(?8x*1QX7)4ZOwGAJ5&%yYS5P|0r_#lXxrnT?1XWm)Il1^Vxg%@wA12=b^PA z5#7VsyQR#}4$QoFL~SDW_nNPLYLGtXu~8xKY&S{>$k|}EBaaoFW}7C_3KpRFgu3_+ ze6Us>fj$7BB^)hCwXB9ImA>@R{O9#c&81ys0lVb}j3WiifhFyvJ2zb-!K5vq7tK@j z)Rg9yuD`Wmp%}qWf6G)opNUUTZ5|jG<}KK#BSM5xv@2DHqjo1S>WwQB3eL!se4!6l zvP*3c$+3@k>W0q=S2QU+;>gTu47DQx<(sKNe(9Kep9QLp)V~F%8F6mu5vD zVw4)b0vFSgNszuNHeU>VWvqC+4P7b9*vq2{&?=hh7e%q&IU)CF}BlzQcq6sT0S_c@m3DK~d; ztqNzYKKpmQ2s`Pa;IMz)o-zVW)ACx+3F-R&?R~dZv^1s7)3X3VzL@rlRN-i?ObU$> z!`Ab;y)HfYna$`Fp5uA*ilFqX(;qQbC`@2rpylvy&&AR*pH+nI+X9D=fECShLrb~K z#AA-t=P*92&j3s--hr&>hia1{8@nmWSi7=*&PKt6(7G$etvatS{BPGo#y$y(AMZ-kipk!`w$v%Q*s;bm4?)iI@BOxKTAb2JdVloitEFQHf+7O$-+Dbp! zA=6?)SNE$*r_5m?#GPFuImMYAQDg~!A}I65gUl@yBi0U>a%ag=OaJwv&WPHsAB@}4 zr^12q>P&CKTyvIRi|1yZhpT$D+fHWEaq06YBy7zKMi_?7zguxq$S)h+Z0>`(Q$Tvm z1qkluMN4r@V#nBA(s2Q>*5o}66+k{F^cl0AtT{(gBl?q$PL7LJ7rnjeSHT$B%Fmg?hboLuNJ3uA@xbpyW5ZSAt6mDR!!tmFqwbLVL&?*(sHo+iAh$s| z$+lc+d$ehh(R83EUzl@0z?FcaU!KBSMYKkt$Tn!)y|?}rDu+h1hAVsDzo6M@=kH*l zQu$l4|5i-`d||d&#^&bIsFmdG3Rt(^d^>b?+{urS2w?}mAgj}=kolKN zpd1-&oB6jZP_nW%r@^h2UJAe8PJz&Rk@4#FqI3?)yRvT@o=S7f5{cBfiL@)E z4zRE|+?T+9?^aD6&`OGba;Gy;QhY1B#!(M%BG^mp5s{CJb~BNBbKZQ-lYbc-l)Kxt zh~L{mSbkq~t;le5kg)5<4*bwEkGImbw@&x|(IYE3`A$x85-S#(H1%=@X{mB5M| zI=6X8j_MU_t*9SzhoE;gF^6<=D!?xUCA!)PMdq%|rh?Vj(}78O%@GlTfy)gx3d?+c zDJCeBNm54CHV;Q%CHVwSqY7Sp_bIvSdzodaEQ}e(94sB~4E649?Fjq#x0!H(eVm=6 z4Vem)of>wVl_!VyGK9@sUE9Z09JHJ6-Hq4S%tv4cxRKJ3NZvKY`-~?(>sROWd@^`+ zm#ZqU*S4Py_(Eu}?*3xFV+%QI_J$}<9$W<5ywiFD=-uv+AEs(L@yj-2V!PfwJpR_* zK|+FOXcuyu-_6OFrSY@4t;^+QTn3a4({UzvXJ} zf^x9FRsWI3jbi-8J%!Y3x1liC^+&cp#lPA#uOT9x9o=H<`>wS^!4NpoM6{D4p7#fs zSR&BX2FJ^sS~?0{_@}<2BWC$5z}^?yK|TnN5T?^tBJ}Js`$NLP${!rte7I)pMM@77 za=P3Ld!@Zf4l(i>(3TYS=r0TbRY+JSiGbq1Fj7CIsx^NiIurvmiHDqv!xmm6VCJh- z0wzZ(;F;|>Z(ORHFo5shPj2(YP*51H2i`tm-OpUq=n5gm7iA7WsE~Fa#82S2*1rF| zzOLyF>5KHWnWt7Lu-g9>1rdL0MMOEyaPU$`FG_dnw$=NecxPj>^>@YoZi-si7Y3T! z7hcsbztlV2yZ(|T(-%Q_SrV-MPEDRGOC9#$ldn(n=0n|s%d}` zUgU*pMY1Gu+47(`?nmM>v=3(A%z#Ar`L^w39YS@jv-iDz_o*YFj~dt@tv@Av>HNxI z-y#|o#n++`M2ugCHO9ps*iE`kS9jZji0fRMO}`xLhd}Aax1JEcOe!}+*vgIMzg{B|8Mf!j$-%CjVA3zmb;<89QL7w!u(_s}2P}jROX)3yrPL)P zci;Nwi1}39{CSKyJeMkIq5;_hM~uS?tSM!>z@m11V9{iGDmQnSQ3%|)xujPH{8S{&`$Aaa_$yHc=p zfJaTDwlw@uL_a{If2KLHzKQjfVZj3hW{e9qR@;Qycm|auT>R#+(-~U>BixhSbVzBS zDI*u<=pUeW`w1nMudfZS-AQ(RQKE<0_ndxnBtyzpJWp?4YOY-3kTK%z$D)NF74`Kx z^h#f@YW`5=)^*&!QXB|0Q&xZg-?vB_=ErTusoE~pcEVNKMAu7s0w{IjE!sy5>p>}p z%X)e8XqRaG6p#iP5{5a;*rg;#z2>TaW5|d)emuv^wmPfrFjx0%b?beTkqV-*WTMGF z7*K0m8&gu8wMyK^@1|CbY`hT^L>6jp6;jytsYRINi)Jv&XxIlo-mhVP#y6B77N3YE zqyDc73mwoV0?fI2tAP+HA^e=JZ?f?g=vJH%X3X=Q~E9j7E5^r1MkF zo`5BpejY7d-R?ha)qZ9Hjuc!yy6|UG7PjvXk*S$TfJib`J6O7z#JF4N9OezA0_uOe zM0h*@d+lMUx=Sdv@I_ZL8^_5#ZJGnE7g{eM78hMnumE)E04%FP0b=Yk8~uV01PI|MtukS-T0dXOz5E3Ho5`K)-0phQNV+#-Y-=3gU8?)g{hp)_VU0GC?-XOW6s|&|-NY88s4A)acQ7ix1o9 z_5KuJyp=*m+2{r}>4`$FRhxd)8>Ii>`mYt&Wdr_JE383@Vgz*6F-smZn0y@eoUJj; z?z3mlH&kM&@0CCg{z#VA_OjF@`obSErlLPTC_0V1|8NKPgScxb(dtzMSPu@R&fyo= z=!re^KU`rKk9a@}T}QkZ|H+jEPD=|%&c^=$X^_gSRwdkPZ83pO16~L<7eBh7z0?PK z51KC;eu|AsREX*-CV-KG@)wCI&aLvlQ@|tGQSeYql;C1j7xY|ovFG9g+uI{mghvF( zmlZH9gTz;Rh5hrKA6!DwMuvPtL0C3~t0A9U(A5e-8K9c3`tyNs@DNak{ocx~-z&2h zoy9X~q|DYs!L9swn_E-3u8n;KZ@2MXADL(RwtTB5MxQ-3x1aL2EmjWfp?;gBjDnnC z1JY~l;_1by)nYcMRyTJd*cI3as9`DX-MBSD97;l2+OS4q4TwxX@5%`Z2`srmf>hCz z>COpYE&WBK2TOxytG1m*>K5ei0zseYDYN@vyU%T@r5rwSX~D?ZDQ?4_dTcrnaG90X zC$57GsYwS;vT>1QN(ftI55=4~s#t_N)3gBMkhtmY@vXiQM-D*-kThr7bRgBrC8K%BAI4_7kQi$(3yUW@2**x+~Y5LX=Avy(dRD1 z|Ix#N53ucluBS+zFc3?-mVQ{v^vFD)`*Uo{wSd{$aT71Xv$?$R-=+u2ACbqpE(bOZ zDLiBnDi8vpUi(O2kcG4m{#H#0U&{ME0}6r3livtKW6(cPb~^jd7;WH`!@_Jq>cE2J zh9U}46!r)rGJ~lYpMf|Ul^s-beNrnE$l_Wg1)=PjRtKTpws0kdG&^aXE=H!+aR?$A zbAo`zM*&v%QrL5d3$k13>z*Y-+zif{7@FcNc#%9+LY&I#Dj%dR2u*}x#Kg@iJAULQ zIdH}~>-rXFXc!zgH(TWxtNHdeoOZ_$kaZdq;fcpUN~jVIvL=;Vp0<{uQB>aF7kPVU>Rv2HqwXn%dFOZTh_}xc z2N+MiS`P81>e^cCZv3*-3$*R%{NZU|vKqyyIo#WQk|S#u1p3coQA!LDj~5r>7&$c( zxEF@mod)#yDlLS^T|gvEe_8sk+q$8vTiWCME654*eujYnjD*q5@m9G$o-h*|K@9|` zu?i?yoCSE-OaTHU`8=)O!@v8KyStQFiM2Xn7cv(MB*13lcvn;MnVULQV4qk@c>w&# zPe>e1IJhGlV-RIsm4SE6)=6sIKub|p;5Je0nW(Fbc}0mLNX8r=j!3UL38H9w)6=DP zuGGAIiWljprF{40026WM_b!;wqf7y4Le{Cs40?YsgO5gA1UaR@Vx}ouBPm=TTiJ!7_`fR7%`^YL zwvq8wf5^&PbL<>8DxYr^}xvk+|>+8{%gsa z>jkqCwDZm|)iPZhJ1YlyVyp3S4qZ@oV7O?NZ+6EtXi-W^qL%P@V23-B^*(B6syGsp zpZ`FV3Y=se@HD|)6i?3+b+g!a(u1nMyJuy9EOhS6C=Cto?QDRSn9eoX04s^<)HUFc zL`HSJZb^6lPkA8RXrj;k$=Sm@ti`=7H8^psBp zvM`bR;&1fXC4cUkU1S%ZTqz_TUwG1iy1H4cz%PnICN(JtElR@lx8!eJ*-(*x9n`ex z%yt3A+~wP_W>ahOvmlXd~F|SB?IM!sxSKDaKrnP&p zK|Th~SNvy12)p7^y3kyG2xJF!B zbTZZ1+pNG2`W7HC8U*pYW7G?`RZwLM!bFj;U3eZwggif=%XaV~Q-*OML|gNrTil7i zO*U-?FU7{uP5VJi0JnDZCi^Q#|1`YG-=hAXbedSTZ^lZJqMmzivOvvJgLlyi2F`W8 ze>@AZxq>l9y4+V8o6LZ0oZk3UqP!hl_Z zLFvZkFf!bKb1&nA1?+%luA&j2Zh@p+*vz10u1XEAMVYfBp1eAEW~*4#aJP8cneH_n zp2BRQXo0Q~-AB`0t)r}X^i%Y9l7|mMMx9M|_qNDQYf-*Dpo8At;&O&~MjGVRK?lCP zo-7@WxmfQhH!C#_PccWwyCxIX)h_WJNZ6MY`)}$`mpqnh6TIJqtN~F zA28(l4z3Tn2kV3(3CnsmU{;dxFcUIcRu}%aDG?0hjrz_TUtGb40?jo$N6GTI`7b~R zxHSL(wq!;fB+5O;8kD*&-aq@ZLKUv40HOLJlvF^~EFK6%zEhvdxGXFAr(A|=3bLs? zz7=hQtl~v;0VJ7onsu76^mcdYJpVLBkV1SQ(K^N;cr=PC)9&h*WJ9@hS(szF&pSDR z*!xM>H)WHDOM90_wU*ls5xCxv$dgGU?9A_jVobXN4@t+CHcr-FKxIlAosN4m%ghnc6FuP#J#-%lA;8p0$(&M!X3eAp=n&{1S0IH$PRFfS}Pkz_0DjgSZj7+B)kWKDm5W62nXIp+* zoA`@wvXMQ%he`vH)0aP)XGRf;?_;pt-Wy)77-b%PlRmyTV`p?YiWbO_@1BcZu}}Kf zLNKs;zhS1zj3Q`p8+fdk{hoLn5R$j{8tLFhTfm)4ub8rw+NkNWxwo-wh?1QSqp*WV z=GEq24?e^UM)yQmj^G*Us9}0 zDB z+p-tK{dlk>Rnu+agLISW{Dr+gcBSd#D##aK`=Dm%`t*&RdCFx9X*L~3!nIkTo#@aZ zB%Hv;a$n%&^LUK(U(>rWDQgFPx<|-l=VH_JKuJmvv{tJqS_C9@+sZuF3_`jGvi=Jo zq{U@0SVNxtv}GcF3<{ca8zk$Rc@m?7rjcX}l(LMb;6^D#@PmMXZsVZ`#4g0rXy>%t zW|`c}PAlui4KF!-l_3diverXxbRai7lGKEg?UjvSD5YXfV$67=KEr(NzLu0i)S!I9 zh55llB9Ig7(j>sftI5`;`JKlmDr(w}l65Lx7I`VY2jlWY{al}ya@lelsMB?(!eag< z0{IaN)FoPEr+D^owyyp5(_uWys_=*H+NbVr7>LQF?hN&ayiyb13w#WJVW50%(9>iZ zDSw|12V%Ixy2xQ^a#C`smNcanw9xal2A`f3hM+vA;AtwD_whPBmfzBI{d zWxz7sDJ4FwrzT#=j1ZVV&*5_oX<; z-J40(6rTRb%BnvW8s}V;h{>h`xZqM9!B?4}_TwY#?EZ3#p3or!IK|1zdiYPTh<-x% zZU#6%QBjV14szuI+N{Cvw1`UugzY!CP@j?#Av@SWG2v|*n(Q|>IX`$cp6=!SvU8f* zJG#_9huVA71zrNewZjwL;VExz7T7dr7U9!x01Qt)A76a<^LsJ@tOSu^vx&Y31U4`f zQhcOG-Z)?A^bbTLvjh|8+R9qiU47z`195Gn@hLWUu9Bwezi&$_sSxUTh9YyIJ@*S& zTd$a!7K=g3W@BA7)ZK^KW{oAVJ;MNmKe2mX4(1woiMaYrQip5vge^*B=;?fyGGF(0 z>m2cUs!bvWb)BuxD;7k|r{}7`9TkefTuF9D{cQnT)(;T;Y|v~CYco3PRJ^Hs-|+PG z&KVm`s~PP`$lDByuyt() zBH0F|fH7#PM(_D0^s3FUnu&IsUBS}W}hPEHZ!|W|yi48(&>5i6uu2SP$Q-C^L7T`)s z2awznAeeJpuj$xdfuVf8$FD62o+ieqF%io$yJp_B)^Rj~_0?NnOm4NMMV|Ig_`rGP z2`YF_31ccCG?Mkkx)W^=M>{#a6<#uvuoC<`k`DvB{*iPcetMr>S3q{~7oq~4^~j0W zP<3*A^MOzey;<4fzK{9|`ib5I*k`K5rJ4ErB32>CgrMYx23i*@FRw)<-qW$E_7N1U z@EXY0e|;-lT*p_Hj%6i>Od*Vul7Cx(_aevh=fT4?s>;z}V@a3WiGc1p>JLy5a@OOhI{bTy98sm5dYp zU;C82C9>Bc7uWu!aIMCu&sbZ^su%y`D*O9?SS*Y$H~w7i;0Mx*7!H6=>5MQTz(Yle zs(2C_@bVpP?EqS;*y=)&>pvp*92NQb{^wVIOH$Bb6}z;jnDrqsp4sDr(z97KaMw8K zlHqqmKqEV+{h-+GyO4=1dz0*f(;-I_{fN&%ULwwP{##CFig%X^;= zF)|e>-aM`}{0!mJd^(>8b{@Tbe{o-2KQue-uDznB-vaoy2$Ds)m~HXZSo+mg>^mGa zG|o9{DSsZUA*S4Ts6gmEaKGK!3Q3hbAZBA1500LMXsO79 z>`=t*Xfe5%kRC#Z%wJjwcz-V7%uEwS7a!4NXrAg47IxF76|JjY-^k#wDcmH1DxV-VQo5t(0SNT z4Sl(8$d?vhVU$iXv5QpN0^fBnmV4T1H5alBKPg-4#{6LT6(v)K!5|GEUb85F+e8Vu zrOZd7_xt_yAwX)wDvfHr?LG?+i<7jsw!=yPpi&O+op(h~CNR*O&4a(P9imnmr=}eX z*oZ2^Z=ArZyaj=!f1oQ3L~W*Tap~pin$A$DFkqhyJ7j&-FVzIo0PskN`1XI{QSpdT z?qOCdi{}enp|7?A*w4IF4Jh}$dS6Ub_0_$47DlEY?)P_Y&hM>8bMC#lQ-A3XJMRR~ zf84itL=vk@`$Dd@E)tE8Q9I{Nc`JO#TMciK*^bC0p=dG6*R;@m(JSrF&3#;N4!^{i zVtC%%_MEoExZNPD>Z0u2zqr_0GZ^lHl1INU9ww;BUsS)e+U-YrGtcp2p zI5|&lwmG&Xa%1vOVQP^O2FddG6t#BQ8~RD`pWY%o*L>~%iO8B#c&@6l(kN2Kyzf6) zCewO&-GyKlbX_mTQW9;V<0s4NM17Bpm-yc+;wQWa@HE2g8R3*g}*iyN(K!*;k z$)hFtpS`$Z(+hLKpjdWkLqmA+Kp>B58vXgg8?Z21f3Er^k#`JrJH|SbiYU%+J$I>? z|MLwM5gBVgJ}C9)-k5tdo5d8OZkg2FQmS+J`Z9bRhD?U-5sMH0c)N6$w!IbeB?@j$ ztZ}*4Qha>&e=IZI`oOga_@$Y%#IOEOzbNoMGPkL4RmsVBATs9OMRDe)pW`hy^1w2ziv)XurAf-MM6*i}u)_+U#sMb8;uC6}OohoKHx1;@z4;A6j zu*$4lfA_XpUJNBR{XoZ4!^~}JIGIFhLYTmi5`Bnx9w(Ary2-^Sh=YeN&FrXaY2bM7 znmv*O0yf+)`P4zXOFyE-dd_9u()l!E@ASFQBSo46W_r=H{#1|stvmczuG$N{q1l{G ztOit00OlzJ?juwB7kYWDrsVDTc4G*u$mim!whN~hv~Me3?SAj9$QN=I_8^$b+u;>1%;^#BASIzvop^NiGqOc5O(xPJj7++n4Q5%NxT@-?o4xgd`_|Kohk z{%oDb*WyH(MYpMgXF}d<$ex4zX6XUVk6F5jHWpvmW3QJERz6z{>XXJBR>$LZHs$|T z$#{==Cy$U(2d})1$h|J<)-)>k!su&*=W(eq%y`WTJeqS{y=%wqmhWu)JkiF_1r#6e z$`2NlyWV*8QBj8{s+UR5OJ1d{ATvK^)IPb*vw(3T+fL3hS@}^fq9A+X2}-0m1io<) zr5W7^1q1N=MJQhuYpg*42Cn~3($D<`{L{&6)-id}n8~>#sLKNjH_Jvb|383}gV=2T zF!a$6)kK;t(9KZ;RC(;@>Y!-?m46ru)((gQgO|yqc>AT=4aMc9McCd7zxAWFS?kvR zwy?yJ!mb0dY|zJ8g=124QPgKY1}rN24_pW%)`ks#zTk@}f9vY_%I4Gh|L{u?m68+Z zi|ja`T^;$Hf5;}O2(^Zd&w;mF`<+1_xO+7JOEUoiD1xmV5J0zDNd3)ioc^|(AK5wG zg8R>TetEO+O6`e_`J!o5Ehc|N_HY$^qfwm{{_Cw_b~VbbWicMdhF8h#-D~)!05g>z zNs+XL3$H2c`s6Zy9S!Tk7+o{LSmb05fXeE5#Abj`&BXq+3T&10eqRzk`}2)xZL}z7wabZRpc}iZu)*MRb5YL4GfQVOD_uSaU8vTT@wZ@biF5L8 zxzohNvIP1q@#pT=CZ`Z--lO(~<)eg-rUWbbA?1t#RB7K8dz*BDNZ7F7(!AYd<S~%_P1WCjl!og_2w4URu zW%K3VI!2y&WI^{Cx|o9SkCQjuV0u8uWRt7^>0I%j;<{@(7ZR2w z2^mwjhd_CyS&K9{IjkCf@Tem1yx-G*U?zzRhso%3UZRg4-+5-pkC5h5dzUbFOT!>1 zkpmHMQS8_swneWt~?&Ups4;zba!HJg=Fa_Ph6lv@tIK zjd>(w+rgaHTMO@ertFY+Ij!PuW~r$zjYo3!O$X9c8Igl6OmX{Bi8X6=VYYb#S zk~0Hebu9>-5o|Wt;rD`WCyLTFGysbv%kzKOea9cgF4N+y)-5s z8}aXR;i|SL=!d%@s$AElN`lw8n&)LxFh56Y7GyUZSTE|W#qQm9Oq6o5FfW2XmSRJ_&#W~ESMus+g zjNzjvLFCh$g~TVZpX#qH> zCCWQ7+r};%bB1$ewWE2K5+{%sS=MQSN70I3f?A8rAuLaW{~PXNIfA>A6EbHmCKUt? zgIqUg)B}BjZ$R-4|E9cHpU9v*^oOZzrNM^Pa)kGJBpn9vAKl&_5qEz=Pfm0tV=n*LO^I_(e6qR~ z9twy#C;zB_ZcQpHC4OhE<@>TZFLg_Ls%LfeIu;skD4h6}OcLH-7Q>~cQhxhT&jqxg z&rPc=8c2M1h5OpJ_PgE|hc_Oq3Rd?|IcngMW#BX4ZMG3nybc$xKn3T7&Wk*{6!q!a ztkk{Rre8jtR{<L+>@+5xj>kJ6CuMG|~n?iJSA#CH`u(l#I}*TAV%U^(|L_?VfSP zJ0~keuyJLQx-i~GzX!o+Et%vh!Ge~U&y>_6;}&*~Hm?YtkQot32G5Dt9sv5uu;JyT zYD{WXaJXJ)aVO+N*nH#T$@92mC}E^A5yEQz(Quk_3x+&|!-rY*yY{1;@!(oWer{XSzH1eoeV+tcoK8Pa z=z7hgv?%U!Z;?lV7&ksQ$_%?dd_YIvDOp32bsV!y!-wYU-!tN$Hcjd9%JxR{o{>@A9FavXrjSTGG;wcT^_PPu4E(g@l3^Y$b zvWf;-#AO03A?mG7kvB&JmhmBuHy&1Itp0fF6+hZ&Q@#*sn}7S!Qx+q$SYq8Q5&dG$xr>857$LeqA{Nq7eaW@Yj#Zgk)H;dxnUMNk$@k#mu= zLTaNM*NOK5S84DjD%G&|zp20OsO(r>TP%!|q^`6#W-~`RUDLA79~Gl&n{+_4f16u+ z{wV+;oWpl_nfm+tFDi!5E?=)hy1RS)L4;I#bVw~7NSe?^GWm{#9wL&lj(X63J~(LjuUZoPQ1$V1${n*G-FdA)8O2`ju8nYTfH(78)al=}3D zw{>hjzGE(Qb-pX|JubewOg-J|k#@`tbW9@iHwJ zEWXhzj1Lfg&{$GhC-bQ>xlT{@OX99`ruolK83-JP^mdtrFHs$mFrzYKY_@8fk7>S$ zQ0TP09V%(+?Qzn@0dxBRG4>Mj_m2aHBAJF(23gU4#=0qkE%Mo`SGNy@?@J01S!$EM z*r_MYQ4uBL8||4H2=-L_fLGS!ycJVs+%sp$cwlK6MXI;B^1M)Nxu+r z(%Js&XM%(TJk(rGO(ym}Ot3f9`RF}GvBffx4y?ZQzFC{O*cl+hABd+zpO-*lX4JBM zQl+TxU3DfqRf)xQUHA#x)GhI+J>~fWdaxRWWH^~ruJSXHmfoP`_`2!g03|e4jj34vOLa|H882lw*T=)XudxhDO_RbmEz z0M3t`1m}i7H}YLYuz}w7=4^76$8Fg*J*c~dZWK`c0FE;%jx*n5+CE90QnB$_(GKc4 zT(1!oCLYGaM=|BkpS>y5NR5~Wwy*b8w=9~C_f6@QL+|I`P!SXq4WKBws~e!0yAtfx z8WGt^#MYSR5Dv}iy&%IQSBIsjTcF(MzbvNv4%_yIJajX!S|HPY5b&DzLi6mOgJ;vH zixff(5omW*r@*<-l1r(PC(Z0iEmhcX!=sr|Dv!vYUy-)jl$lLFD5j@TEjG95Q$3k5 z7)!c;7m=zbs6O1`rNDQF#T#ol;WUZ2Qz_khlOxfbK0yn?6t3J8gwxW~I^8%HL|^R~ zNSaTlJ?(z%n1(bH(_D^bNj__ocpz&KUG-J=IMtuKSf3I4itcyxEL{yCa-6REANaBn zz?TR6a;uPahi1hH%eEPb=XWBG+k1#lJEsyWa2XFaQd;ss?BMj1Vc>7MVKc6cG)EAB z!xLClaJ#l^yy7rmnIdc&=FFK3~SiQo`=i6q5?C^HZ5nDg?S+PxvLCL4Zesr_c znn;Vd4H7rE4iVQR<{!2>K6}6Xp90gN6S%j-V(JpeA?(FUL7Ry9NfAnhTOAR_Vt8d@w z$pbwBfMH~00(k2f1JSGfsh5c{O>`q2RYxTR@&Z&HxEA1^Q6E|Ldk%`z*dQ;sUiYJf z%!hDo2)VG?CIu8rT}$-J_4g6L=HcG1M&)2r#C`lkWB=+EW>x$9ck$;e{l-Lb&*p73 zK`4KCF#2mA)Htatje+e5y_}+F@L@(HD;~wPw~B@>f>Tg?B0wK`Si3^?s00DoYK<5b ze?{=zv=J-)3aHiKXkx>&e(mO=JaB;;OR}HA4^RV}ejaaG^ys*#cwApqs$q8jAxsL` z&H`is{*7Q^Ne7|AU3~!n(>|w>KVJ8>!{39x=O1ju!qP7tfuUL%Kv?b?*CK0;Qpv7u zxjsTTV#b9uD>`GC1E}^jl}!|m<2VF)k5ex)qf!@Bdi_2f3!-too+*mf@KU$rWgo?d z+$d!fip_EpFAn;#L=xQ#!n`*doMt}vKMW|8g7ryb8&xUETiu6UE9dtMe@Xr!xumAR zta8H1%G~#YdTWE&2SfkSNV#m)#Y?FsF`88wq+L1B0_~u7KOgX97J?zqz=!56kwY?9 zmN6qVRl%u-##`h;1`N{vxr8(qOu{pIE0f^NKswrQvU336#?kGYK@cleK*Z6C zhvqGuiJshNH*|gb)B$Az6n*TMH{;z^-y+IQb|ld{!geZYG3NKVw63y#$1ZlCa2{N_ zA&=GXxEy#$**okk&8KE1moy|kD`S*fzLCe<-y;#BLcWmn`y`i2p(p*@BYt&apFr8x zqg+ztd(!*Co}TYHtC|eLxVs9@|J)EeIr%1O0|*bgnEwMttx_OtxGlXM{|KRy34RU3 zZZZ48PV#Oj{5;l3b>Vu0e~?+q#j-2cD*=%wTDxrW{ahp@fQhaES1e04avTmjd2Iue zdrWdD3qbpFU0L3IK2+5k8&)bfsY4brua>f95|E{}UQSI{;Fd-?S}P&uz**t~M=N#0 zj|}gJ8gzjTtx+dZKKhG~zZ_$}(B+xI^Y_#zUl5~SBfJpvGCGPiKBuD>yY`ze#+u%u zOxV@Qa?5hc#^cG03um}Aq{W~zxe3}*9m;%l16n>NI5jo-!+Q!fuLXo&zs}4`YUi?I zs)i;h9y_nt8j3yxXthu-+h;z{bazj%UdE3r2yGjX(KtzRqzCCtfoZ@xm*p=XzA*p>xG)m0w+0<&4gK^vbex z3v|D^@d1eJ^+C5{Lrx6bAT#-YBf2CUKyGp7whWWci`e!1cvk-gz@T;_&AE zKLl_}91OTsg-R?nR{5xN^y{e)L^IQOi~y>7V&o1zB1iav&{>J;34)dA$XCPj2*PER zQ%}_8w51MdkP3b#R@)BfM7`;?Kc;+3=@M|nnyJ9s^ywEt4=Wt9JVIBd@W0RMsSsaR z@7WoL11FGchrVWgO^ZOUt4rF;fWI{n=sg3BUf9spYwUA7jKzv+pM3ELKEUM3XV<>9 z(qj!cMb1ZsqfjcPwq57#wQI-_MbA)MJ+ZT+W^NVLYels}VnTgyGcwFavYUR$GP-=gx+G1TT!@Td2B~!ttYQzKnKGdeMs~^J0yCG zPl@K2LzRr^9F-XOM-N_4@nJ{iQD1LrhvE7*51lc(#+Gdqdv~1s9)@ zfGU zq(WeYR#YU)DQ8R;8GsI?QxN$t*9xmX?y8zrA}kZkuu)Qdq#{RS`-eTDIp1XM?j{m8 zgP5Bmiu=S-o5`&e5wVan6I0*GZ9Mz+2WBFB<$wBKwy+t0GF4ynrKke94eRzp{%al4 z8xEV|=ldHimM-*HVwa!^b^BI_$)JuB3Yht5KHQ{t|5xTq)SmC?s>%FL`2ujfkmIMK zyS&_dVa|CvIW?EN#i@Z=#G~jR*(i7v?ZVQ5^Ww=Y@w)Ik!`)I>ua~EG&AAdZ3{`5ID|AA)deQA783qxg?W0|Aw1J zY;y7oRY!Fkpu(pGp)wYjAt}DR@06~WWKYb532tBSS7tT7xVmj+zH6*S-HJ~CeD~=1 zKoQ)o#SK&$V_1-8Um5uY|LVg4HwJY=)ZX?KD?Q&97p%APwYE?U}?j9^awjrJM*1EJv>| zbR}AfFKjszTSNZVlb4H~GtMvlF_;`uPsPNkC8KK5 zeVS8FCctW5K<7OtbvJq9;EI;+`d8ge0YC65{G_vB22UsgmfQo6Mp$Uogf2USO6l^{ z*A#sEb+#>X{8v!AQjO!N-3)VEZZn{z(9~Z)+kGfRY#}{tExbhiS;SBC^?g>-bzoeU zjQQ}6RG0l^X?|(@kXfUFJxsYh?D9-OChE-T?&>nKr1W~#-#jC7duN&$GP!BH^|dme znQdRO?l{isNx%pBd-5vy?|sR(QI&qA()CY-L~16MJ}ZI7O&*tpXw8&Qg&6I}?;#x1^0->-(IUOa9b814;+mYXEqEtxtB1R#6Kch<@|2b1yi~a72;W?w7C@M{I z>s8kCyOMi_K+d$heTA@Y54!{eUfKV{yH`r|QogPKMM|%@C5PeCLaBZkLmp%(|AxH2Gp+gsIDC4H{J61#gog(2r_nC+LjEIa$Ta%%W66 zX8z{8#(n?C>(y_XHp1zDnZ4#``8{%0mf zp8QpCfvR3eupqF(VMyUQM+6s5tt|&f@O4Jf%y+bL8B8ZqBsLkycYlH^dE%cvqHDRs z2YHj0!y~5bf`|IOta?h$gNexpI^*@o+y{>~l2XE%wIv&Q@6@IQ<<}iyjaE@7A6oc~ zOFnS)rJp$z@(5Qn<}pk1P7Kb+y(@4e0Q3=vIPTh4*f>NKTStgsi4Sgp7kQBw4|Nvy zSoi#XZoexhE(uq>f9k%u$io^7w2iJGTjq#Y%m;S#(pAjA0c)@Qbt#LwtyZpF60iqs zbEndw{BSQ_J3E|l>=DZP`&FOKMmic9EU?cA-`dY%3cMQ2%v$R?Yr)tt3Y#4~yIpo? zwVG7c^=`U}8TMs89=n|8J2F;G`V-%Z9L_VW$?9BwgfjC?@`v~lMIM$k4Beg+Fh}2a zxTxPqD%YPI6$aJ_y}uSz%OaQn(kv%}qnCvm3KGWj z1K^Z$%i-TsB7iIAVEKp zqG+Ne8X{C}w(*k?ET1p4=5lb&y11X7Xw?L(Vy2C|Q zT+_nuM!kO(0`5@ZX~jQHQM4)%LjLa}KdAwD%s2sY`3Mi+!CHN#?v<-Kp6w6Jl(Ie4l>)1ROIciG3<72+rmUEq3)!T)1>i+_4pCbQ6d9shrX z2tAz?q@`X$=K;)Zsr%tS?zxytK$`|TgY5r&3w!d&JeL_td2if!Vy8_xLefB$3U%pc*W4L%Rxp4bHOEvoLj5g^7`9iB zNznQps1b9Im2&nM1^txqcVy4z z0h?@bqI06dpJ-x|p3b`?inL8B+g<=lB321kDqmwGqiY5^#eG zSkr#OIz*k6=BJ`URbOYKeFyioi((KB(iMrKwr3zYz&SZaJX2&mJ}G~*`t)c~;-NV$ za%T@6N`pqIBPb!T^O_KxZodS@QDS+2YEBbY-|>xxbXv4tgWG$k^t0D+{&7mKolKbX zhwG+hL|pA>61(hkHfGZEw}jct#^LD}Xf2W*#?5-lG^mxXn34s**d`TWi2ya&oSd>u&)8`DOBS zaXNMjcb<6A7gP)aL%D6hx^(ZR zj%}w|epBSBZjR)ck8eBlUII7fIg$&?>Tzg1^6K5+eJe_LmrDSE z^K0{Vq>#8!WR$CN*esG#Q2w}tM<$);Ewi?el_m1;hwq&_A1pm-dX-+T=4O5UeVmS) zS5a)K*U%$I-TUkn#G^{LbW~6Y(nOA-i#{2xPsQliG zcH&j#H%G^ItnBMaj}S|o#_-g-p(Zpyyyc&n-T}ZL!WX5&!Do$oscd zz(iK6llakXiH09yxaog_p&d%9{uW4o&2dCqz_Fj}&cLw%M)90irb7cd_!>|$Yqavn znUOff5j!I$T9~=Y{3pw8cZKndz5fJllHrk}0nI_)z?1KiHm3bFC`BicvPwJMTm$$w zvXz{ZoOT{`Butu5?o}kQPZ2_P!jrNan4HsHMu;%aB1+BV-^5*{@+Ql;Dt0GXfHwWq zLkO-f`p+rKJf@|bOC{2RQeN!GOWqxP;RIAP2>tCn!+I==;=NT|ZC6gE3F^jG`jQ@ zlG}z@df*em37n!sd-MJ|d?L++n9?M5xsfCpmo=)3&+84LNvM`C<4Br4anxRQOW)vb z38dgO@eWa>+2RV+`eeUNg$iD?Q;4pa+xic0@#Jx??!fm%zm||Onz*ZmcrxdMXbf6H z`Uj{j$2w}h)>lOc_x)TznDLzgNJ{QM8WJni9hx3VSQ;ZYwQUD*v%4(4c04E8B{xjb z*X??7@3yd^FDVJ(HUd%-Oz7_Y9UfK`f>J=#IFvYHY&^ zNUM%4t&dWDN)LQ;AGy*#X!$)pd{u=oqb6a~I_lmqL<^|pKm&;0NR&l+CAzRzJ- zc1+`O2UQNw{g%1w7Xq0$+F_fADGX5FI{)+7anbN&wl99u91m92TJsN*cbqw{0Mdwb z3VcH=t`}xn{DEgls%5cIX4hYibg$v3UV=fmd`xctszY2YmsX{HP7wc6_-wj&Z%eQ5 zelleZ8ETTu_qWuPnOi+YBOa)IY!IOAi(;+>p0p^;ylF~2tc2BuiQ12i zfBM7aqXz;Ad!*7Q3czD)z+;s7ee?5Q-c~_28NS8T{8>=GUe9_VDz6n&{H376BjqVL zzu1yS8d=6}O8+oEbgM=jI$B88m%c4r>I1ph3=l6tip&R3g`o)m)f{}3JAkN;f3ShBR2O6&mk`jV-KQ_Tgs0V2?uXM*s^$`jO`bC>!N7hf)DeI!@5duw zX9rkQ8uYP31cpY3VIzIS!wYYJm&;xQ6=|eT;x>9VlCI|fJ=dZeL-~m;l&n;HLa=#>UsB}MXSc5J95Y|y_m*R zMNjX=9;>}={$`c=He(cJ^&Nc?2ele*xrb5Ir;lf9R)0%=HD5D1uzXwc!3%EUB7aL~ zyl_kJM@8H5uT>z)#x5EYU?K=3z4$dxW$+sh9elsG@h5(CXmXS*?$4L>_BiOCf7v{M zU)cKeWF9wR)*iqXW(mKn($7LyY@<}jFZ1P=*UMXu(~-i>P&y+Vzxh$_3(>8V_Z_h< zr~A;ny9Voh=*_zC6fyLA8`Ex#wy1=M3h9j8%{XDj=V zpt4ocPG;13K*7f8I6fOx#3Xg?ZDumJ5cG0yEZc!_YFosF5BT%5)`ob7gPu?+3d?C; z&mZ_w9#xZ7M8%orTiu#p%<|*-?{ZV@y z-va0!>H)iKN8my0VgD%7HBt46t3PAchlUcKDY6P}lD+BP%XHd2{7V1dmQ7}rZ@MZl zmUT3-C^RE*M zg8jvL6f6s<+A0|t&P^UG?-782B%3lR79`oG&jRuZBuaM~#_>JsG{ss&_kQm~zs9Co z`Gq>EpwOD?pFV9k68VK*jMoxN-k_t5zCCM8oW|4^mpV{)F0+@pl;K}m(A)?0w;JO32Z(%M(G`qw@9x=i#E%} zZT?6W(CVN$P6STYbfbt}!T%YFf=Oy8K$L9DewAOYKF4o}wNGLV=)1XCJ%<(Urt>C5 zYced4+oekn7u8_}T2ZdULQ`oh!b0iKunQeu84^^u-ln@B6osl@aWem}n*>4YdL4G= zKyU0SHe=a*6&{wD9OxHQNh^7J%kD+sYO!xFGu(2wfA(2C;WTL=IiAB`x8EqAaA)D{44Z-B z@=`4OP4t4ZVK#lEnPO^hEQ$pZkfzOSK*SK(i13rOX$$7;P2S!InS)%cE47&RO!jz!ury3KghltUoL(It$AoOZ-+b39RJ<3)u4^rY_#(-tte8(xmV^DD(ji1wGhiHc{s$>Rc_`PLtjT3x2!dncJ( zpMNIxh4CO8xn-IrYHyqAd zJL=KpO!uogWHUKQA<52ZW?7j2^INgcFq;$HXYA5^ORrI2Lg@UGUTSoCIl^x%A9jM2 zn-0WF=?B@=I;KdR>?(d*Vw#XZFk3?>?`@LQCYRmeS1n%I)HYGles5n$Fw9%p6RXEQ zaT9qki4vUIEB?O7W23mlC16ikB9hq~Lwtxe`=bWFlu0|<11J9;d`6$K(b3Y|WTC}U z_vVHuL`LnekOPQ~_C5P%OKM3V; zb&dAvZL5Md3u#0kWF;eaQHphW)@!LGqF-1a{#X7>OvOAKeCKSjAU)rS#-fKM=2ydp zE}f&vqJRB6Ix>(&;l%q3VOpmzUt+n5xtt+;H874Prog%Qk8OHv?83Q*`ZS&0H0i}E zyY%tpZgNleBT-u=gp0#R4(nC(`8RFL-25s?gY3okhc8iEi;*X@`tvw$ncw>w|w8~;64if+)cHf8-%d#*|CTsYC7+sW^nAt@10u(m7V%5G!c$7 zmjZRvto=tCZ`0{y_U6fIQ*jt)m-CxP-#ukB6ZMBn9dyiU0gW6hqH>CF7)NZU&wTjwpV(+c0Rc#P^RDu#j@}}SK z@BO{+f9~~M_qxxy&vVZCe9rmYN_2U8Q?SIj&@`9=!*2qP!oHNi>~(lil4QWy@pTa_g$ho)MQK^*84#5LyrmHjwS1Ab>vd zl%TB)4RzE~UmXI$`D{_*;I-ACu2st~uueQ{oe*t36zlSM92I!B*-RvJgh*nkmr!>X zsqBw565ts~qp<6XJbbG=*xx>_k3pY<^u`VA)p?*ykYFx)7ZPc)0mF~7Vp-|X2){xd z7u$pqdr#ZSd~64RRFG$8G)LIQ#8OxQ&ZyM;ts^1+aP@MWs2AhQ9TZjXn|Tcrn@$0< zJ|MF}@sO-trcgHlUZf_=7r;G_Ik=?~0l{V|x-*T}>HmX$Lpj=`~J$X!`abz&*+ z`xf^vG}xEW8q>2ZDXwr_=9A{S9kzM$M8%g+d2~9^!{kkXBreI7sia5{O$-su8x8Bg z_6j=w@4X#Y(%IzF$V7UphcuEBjKD|c<*(sbz%gYCOGf22{%du|FLWuKT+Oz!ExIb$ zvhoc_Y;+i;N%J#4-#5`u1Umxb>HD)7A08baOMilYaeaU6AaccMGdV@EVT__ZSI9iI z-uh7}<5?36zB1?%IixIpVjoy}2-J8sWQ_F2hEOARYY#I%X>mcL){$AECd$k#cue|b z>Ce|(4#Q>Rr+W>K4d7~J-qvTaC18JB?a{!NO;`QH=dc_JwyiBjADHQ!+^KeU>+ zrxk#Zx|<~fMF$|CmDg5wDj_rXvIs{TPsIT$#Mo9wp?70yYE}rX3E;Q)R zt8M!4Z_cZGEPo;tiPcVcF9Q}u7sc6dEq!SX`g#4xHt1o~ix4a_J#i}&yp_{AZuhfs zO=}Gwe;skQK4}R1P3J>kR&3U_;@*>pSv@~S_s4Sq*!m&rK2cT-Uy2y#<)iLdXoESo zHYg-hh5>%$%0ozeMj==DpLh2Q-^$;8xJykAR8b5p$uLY)!TkgfsH?^0(kh6n&XVHr6;N8so>`Z`U&GJsI&yXX*K}8!+>RW>@q19 z!V|TxpKuK>zzO%=UG-dB6&WsKgBhxKtnA#UmXt5IdoLy@4MftHohJw-G17zF)>ikl zG?XD+l6jvs-jJdPiK-f_-9xrVcQl4@xR!g=EmAcV9_Dlca(U-czK+{<9EtkHwj**8 z_awLkIcDov@}9PO0Isr)fRqZ01^%AqN33zC97JK^y@hPuH>lu$Kv|=IDtH<%Z{g!JaYt{AL zbqib+Kw>Z86;IE>;2|Qju%h+Q&q4$WBWu1#W2*5^`$iBJ{l$UI{0hC0mys8%tT)v+-*|U zuZ5&@;2PXPB;Jf$RA%G%J74@zP)?b=h^s0Gw5k$3#<9pW2UbU3guk?O8ANa#l>Nj4sY+u z{${#(SfA<){3uXC>q{>zaG_f*R&px-Vw$g ztnS7q&iz_AvyK7&RY!t1Nv9alV>+zk{IWe#m2WRb*5*e;RXiQYeb8n0aFUl631Mzr zg<7<8>t*AOzrOBzMF@rU{3;Bj98*63G|?{r^l6VyR0`1N>0JTPvrx_BRRq0yqG?q` z$8H;82fZ&B6nT9K3yO~;k)B5aQk{3)2+Sg{76MLfYeFSs)`$yHavZGdPxJB55Dv|h z5lR*=L7t8l55vt5ZI_|Ol=it#G_M5sK}$!~1JZU^XLFE- z@V&!5TVcRt!1?1wIS)e&Chvh*jo5u2P9ULrAn?~@0wNKcfOaSxy*2UgZDZjdD%P@g zi6dPg>+D?v!`T&I<7tr$v@n9%TGv#+oGiTdzXSY=P=GXg*x7_=**u>%mf!fD*hg9E zQf%$}q~v2myJg)PQ)NgyuB#*0^c@;I@-z22fZppokP-+NFX<+p9PCG!V*+2L!Usg1 zb!RaK|2-)ekv$~S;n2CWJXoJ|ACBq#pPw0X8|=D02KfJoV$V?No?YMbtxR|AH&t~U z6;~w%z0Z#}tZfT5`+9CBhIWP;w|aci=dCIoOVT0X6fh{%KjH#KERR0k+x`8eyTh=x zeq$_`cjxRE6ftAh8J7F!yYcRTkEM>sH_gGtAoY)Tc>)PF>YQBRx5{cj5XruSwUB{P zisGD7g=>z5vVvsMSAOZWtQ)#osmk{TBGni^=B6r+`rK)5)U|6f%afQ?*9ObHdTd$e zsopH_Sm*$ZC*QsPh&$0}-oL~NPV&k~_t&#`4L)nr;4b?68VG?4&&}j1D1}?N;v>0_3%FI#>AYz*zi_ z_7mnCgluutz}ZmYbxYgbub9}k^>yN6z(%l2VQ+sr4RIG!y$r>|K(ZaH$B79vtGql% zC~;bF>W{U!jVgcUcGC~)H=xHZ3iR}p5aCRB1S2i7-|~6e|IRokeli92S*qYOvO)jZ z&eQAyp_v~l&@VzIHhjiQQ*cRwg{8=KV5dfvYq;lR&=?ciNI-zw9tDu{{% zvt!U3qxEUxf;<*fcB%~exN>a=G@D$Rs=-4&CJU-)@4eP+p2VjHQ>kn-4R$t*ObaZ) zx^wPgpug@qOwJpaBN{5mul*y*gC;mvR3}JzCWMiO?O{$u&uF&b>J{y8!LZzu^)2H1 z7{{idK(?|rm~5O?KVt~ci#<7&^ZfQ?Wou)jN%`7vI!FA)-#t%x$buZFh-Cw5O-+s5 z!j(8xL_k2U+STuZHEOP4e1;wU20Gtp8s;icq*<%Pv+Xg+?ySMfPcP3}nn9{et6-|a zKv9V^4||(EIVot|ckQpE7FokntrqzylLqbT+J!Ti1ts`x6=zdjSY;SrhR%8R354M8 z1&`1A8la)X+sOMG;qm;A{@X8%H&XH`Uanov0rz7lrQcX#Um5*=9V^nV25ID;X*RIb z*0v?vc{SZP0FL0@EP%TsCz+swao5o(tilZZ=c9x}X+{^m`nZ?zVwnN0SBQq6lncK@ z*t$;HOx~ZiAI=#m2islJw4<+DL33vyM(lP-IQk@&;OD3+)anPzoJsr{Cz>>}VBayP4=^@WBKxTc}O ziaBzlGa74`6xCSR$~+*sIeeS~P;}s+i1;OdOhOVCx(Pybu)N{p^0n!wP1jAO8Z$rj zwIMpNHSsMDD3fPeSj^-lR2sGT65D!Er#G6v;OdL%@en=N@zvP zd6y^a$i0v-?Tg{sIYoM4zw$j{th4P@(-Ha1$OM`@I76p~&=2OWKc&5JoV zlVY@ntX@h?u9LbluD1xu)0@QZCm5Ta3w!36=Oqs@ip0}G{2FkJhL2;6i0wlvwBGRH zJ*&C<1Wl-?Ohd5k3P4zbm8t*_vEj~CE@8=C5@Rs0zd9txRMv-V^<<`Dk$6pa{&#mF zI;M&8BcX@n0qrm-@Pe2Ckt8NSc5XbdX^YXx7U+uwiDl6cNn?>j?{D;F02Q7X1C-gc zdl|QT*(G@1rEYPZjB0V6ZXSDQ+u_5R#-Zc|mhK-eVGT(I2pU>s4-h6M^z-F{Ice@H zQ^e7--4+)198IINt6O@|KDK~mm%W{z8^{ndfRz$pyE7=W* z4nxe6e}}Xib-+I4$>(KPKzufC1OM6TKxic9#V&*)@j5)rgn{hC4tcD6PxB9X`X>4? zgEj-{%!S>QS2;RE^Jp~J3H$Q~`#=l$ddNaDG3wp6SZPgi_-*^di9`C;)2*+#RrYZP% z?f0ek`#xK36-erew|w#SZ0O|cwCfRZOQ}yi zfS?Kd1}*Fiq%5e!AU;wpt zdKj@qupRQjYgqBVr&bJkrB-yzp^fN0mAm(Vh0bbfkw)6@Ne*`9dN}}M?p~4kp&n!b zdE}f0KzFz|1nRi4INaaBJpZM6c!GIwt^h)`3f3UXnvv@ZCrn~8f#$q zo&6Jj`1&`?)(^I$m$sb{kA}nokF`jE$B!v$ApfnCJjmS)w?|ozvaaW+*Lm=fMZ&4F zvMBwS)>l~B670g&?nt-DWy!m!qifhwWWN2Cxi_1#!KaXNKJp<*jc%`YB{Y1!xGqXd zsvvWLe&A3kB<1@;i`13-C*<&b$w&4y@!F zuXg%NseAH4U6vnuJn$ia=8L920B{?3-jB+jHa0IR{l%2H>K&ZN@ORDk0I%1V0+5v!+>2;%G`A}Djajd#Jq~Y2L>%8H zz5e&`ia8jl3keufaolBCJ&cPJ3h7#=&WKpQQ3Kdhlj7L3={`(yd8T`7z3U+dJ+|bg zK*A?hk@o&tSs;^dS~?#bf4;8p60vVTF!ov0{t{Me%oH&xfBz4_8)Qgz4~WoGCOkJ% zzSUy!XGTH-Iu;2k8}pQ{duSiBpV*v{6J)$T+y;5p$Xe6rker6rJ2I^HR(8#0lMYS| zBwXKtDjU!1OVWqqslxF7{>}B60@=6JM_8j|O?zC8h6?0(ph0{9`&Gsao}ch>ms%3; z>$teJ>S;265&d5SyhDOy6hR0Ojq)#g63`8#1*fz9vn$fMirAjvM9|+(z^F@v0nsFP zqriO=$>kqrB5R6%Gm18cF9E0bCD?rMFxKX{?Y@f|A_7>3x^tKy7S ziZDN?&TMsJ=$^CuIi`Aa%Wn@ z)8S@6=g%W-4mj_nAo`dm7jR^M8aU^4@%?${y}3Je%Zx#gXJV!Y!TpCS&eWgSJ~GdJ zv$N=9T3ZeYr%LtqSyR6Cmnd|iU~~?6_))n=ScX=-1!(^ESc-4zB)`Ei6ua6_ICKpi zfZT^ANCfAnvS&WVX%7s{BvJ&%e%2UiA?`v{cKQ7d0{)KhTqGdIcvHPCU#K*jp(6{- zpIQ{S%}e37n|z~W5hw? z=`a2nSB*NiWW*`LtmSs+8^WuzFEbyGufech2g8YEt%l2)uMOK4{Ny8he@-_hqs~|! zYUoqo_Ijb7^7ny;-_X-8tD4mZ={d7Rrk7hbAO8wUd~HM)b2=QpXs##No1ft}|Gcut zs&$JrC;X4b1H=z}HU9W>kWnoh!#TSLc=%cLC3M{Is_qhOOfWkXKiwGeZ5q6|Vh(Nb z-MS9TC{^Nedsg`DD@IGK&}HqhjOWUoLJTbTHYXm;N(?~Jat>dTTXua`D%6{=gfx1! z)EE$3FC6DG?xE%rDP$%;nd;;^ngg65X5LA;o2!?}4F3}7cK9T0oV$Q@caY9lr#F*j z62U7pXK2T9*V_={rFmVD?**j_9JFu{MVU&Am6L?A>>Qh|x22akVz;`+d93#E&Sgt}8hDQva7| z&>i{~*Nvmr63v)O0pH7W6d=l@P+PDHx~?Ewqj_wyrUct<`Wz26TU)GA$^X7kE#SRP z6{<5P(sofVqf0UUwADu0-}$W&b%m>7|L&K(d+yBs{*|H{~4NXgW5N z;psA%cK8@JO`dfcmO32+BsY+egve~P1dI#2ofrsa0$={H`0gWoy-nHNe3T)cQy<#?ROQ=e6|yhOyA6rZ<@v+@uhd_ zzK7mfyO{>K$ee$Zb?PnuhB`N=4Rp1i>?BD3yJBGD=b)47{OP9ymJ9v~dX^5Qjo|_3 zfr&e7>EH~3xeth|%nmt&pFzwl9o^3t{!o)a2mkn^m%3pIGAFCOKfyb>?X9Cpmt3N^ z9w?p&xRm~R^^8y!FjYTd}(($*EcKTP7@##~ZgX%E_ zCE9N2mS!n#AdvI6Gl!p^1)mHysIYsi{|wzqG+$+i*<+#+S<^l>P0Tj4?kt!$cupz2 z&3nsB1((Wtq9s~_O^_Jx($2uN@-e6Kw+rES=3NIoDv3UJJO@~viBGz^Xo!Ir%$}hQ z>gi}fT$<^oBkfzqWxFCWX`999vm4~#KReXGaLn}7P#)9^e+_)Jv8$mfWn0l!NO|5f z*YS3E@Pn2nU#Yu509r5MIp3>U2MOL_;aznR^xf2z~4_BmqLs)Z0nbTX_X=>e;!i*ON|pfNY19Qw~Zsr zwL88GD)aB^yVuuyOV@qZ_RO}aBASohE<|pzNq|H|rrlQpvwo4>j{Aqyy;Qp{DC%=W zG7YP~(7bg#GS!^lVfB8#w9^nTb+fm4Wl&xgS&?;mf}1?ssU#`X(EX9V-pZHMH1++k zi_$c#ukUZA=tWG(#}m|=wFcqVeZ1bnZ@#bzR9t!lYse2`9TbzYu0=4{!-7gwux0H1 z#>Y=B+RWZorP^eSji80pj#ug={mJpwU|HTX zuJH`JTWP?H)?kgI#XsE?lS`*`SZMG?OcJ=alQRuUh$h6X+ihuF!gvHemzO@XW!u9S z!4OLXtZN#{^ejg4PlC|YAt>0`V2E*`IIGxJ=nt0n!2{O&#N03|Yf&pEJ!d2Zk;6i* z`?Wt0M_qLqkrkpFk|{~KFI5ee*1f<%w#z%_Ld(?_8q=Xd(2 zSCn{Yh%sbeaQ07nE+w~EDy9)yaC^fqrwC?B@(70kqM52d+7ljFG{+kWaG~Y+?*>20 zsSqgg+2Y9SsB*is7Gzn!|2USfBToEk!i0~PY#@{TttCH|(2qxx;xsz?g6j|XLlip-YHF43NT+KV7II!_st7imf548cRv!XymAs;jJr|MUi6ZXx z!>u^Z-yvJP-#fO>5>}pv@%;_%OH_@^ps7<7M+=Ju3D;FWP}s}-~qxZ4kg zgYAU6#b-YK>I%#w2l`t4DkbNKvUb)s$mGQ|rl1t~6OM_VB~s5~M~(n*lt}L%uU6Ut zFu$Z5kWs80I&pttNy;!M>nTU?Bt5Lgbt50If1YQH>Av;$QR2DKNzupbKh5?e^WazU z=O#XLTb28O0ofZTI=zC75ruGl+eQTOp?3_I5}Y}Oj~VNh!rxX-8}%m|EpyY8f6>MQ%wx*nI5IT&7PPmA4wxiLzYdar+lu)$`|w;5{@`r$s#TzDky-1 zrJ-AO&sk`|FE1&+ot9-YC%=l#W^uWM{tD1vX?HUU_Mm}9Id5OV;Ep%zew_(^+Evt+ zBn&hJI(&#Dg4!(jO0b;A!6EPksV`iGH#9GhFgXoAfGSUcpu2U7V&&5?#1`m*EbEwM z-Tr5#qM%e>CHxVZ@D$s@eG^Li@P&Pn1j=)?oenNDnNzpZAA6{$3?Z&w!O7np`H^H; z{(amBP2qB`7`_x@vDgs*OhUm3KNn#lY-d)?aq~Fc2nA&5Bx42ZtG`I0f!}Xeiin8d zELM5-O;G(kX0mG%mpL#!HWoF7!Awcc8$RJz$4dR-%}gf3=Qe-mD(nU(f-h0&eZS~Z zL?06v{7BV^DIm#g>K|EjE0v^jMUS50&6{7-U+efFsZ`V$rmkX#Nf2RC2<<5`b-bI^ z_NBTMO2QW}tg4@2$M+8sY&}%zxBOD^wjpGgX}?%`(=bti+aNmL z``f`PGbfke!i~xAM0{{-nl8NI#8rfy2r`3}A;6t`0z6|RGA}}+_e&)(7$v4rn*b{P z#Als-#c${31>@$8v`s?}@F4ysmB2(zPJhyZx;|I+GyP3;#}-=H(HZytN3B}~Il}RT zty2G7x3OdG?8-e(&CS&+wBvZs75w4;iCZ(}^zf8k8GzSNJHO>AmIJ+rZ$uo5psNJu z6KVAcFUNJt4uJt#-y%-g0&zLL*Hf_+!5|n`5U*I9mb!hcC81;Y5NN3W z89g_i5vezqYC_NCU(9%c$rE!SMl9w<=icTxgCbwo;sA^Ks-3N}-Yu=}go4A(zG>wE ziY~e-ZaotsaxihYD)LnN|4qI6bo??;q5AmPM6;>$XDpWg(+9|t^~VN$T^=&(T3SUO z2A_^tniw^PY1KvL^-Sa;tGsH~O}j?_Il%QLk-G{+iErnCFn>8ocX+O=KpV~)^& zLBs8Rt{b95suyKNH#Rm!O5<;KO4%Gf*Y~&l^V8{$&N*hHMsZOB2jTl+B{Y)1DBVQO zT6oy+^+)$~o`c@wHA3)xyXb(eaOVvrON6jJf*&AW^`XThFPX0IKxyOW4?QyS}~1wm`Okz26*TZ^AWSDTJU>?I``*JVgK{g!?TwR9=rsbGsFz-85&qp@REY=cvb2Z{6gV9MY}Maq z#4pyP{_UyP6OR`k`J#D$;n&vA-ZqopFhUYn(DwQDB&3frG^j8>eJgV1xP;CviZb{S zV+@gNp-1d>&8H|BfzhUX*12ZO|2H#d>-k7Sv>=sAq0`zHin)q@jqNyvSnaa-%-RGb z{`a*^YkwM{9vM|x_Sj7haw6Qj$1dw18)U%pM@Ywj?8_PQB5NRMw`b2ZSdvCRUvEwy z(sBh`@Vq}}Wi`QWa-dlsbK5s#d&~JoN{Jqtpw%gsHi&`#6m9gp)-5*h!8+NH(f$#b z`9A*Wz^yQ`O4fZrV`D%VV{YV|76tI2o#&*U{V?;!_l-pMH6%kDD%|b@RX6?U=i4$6* z=8+S(v{+x^lK%)^SMbGk==-m)z0)(UwRrIEwn|RGGwrX*1EVQ^8)LHpZAm(V!(B4M zj+cosJNV1u1PTD$2t)K75rfguu%aSrIX}ON%8WIXx1G7*cj2U%YlJPy=ZV(WAvKnk ztX=i$>V2>4ygrXj3TgXOt&dEIYdc0}QvHxA*-yxyFp$#= z06O8k>qFI51@J2M2DN);N0x=7CX=i0YnQo1?c$W5Z*PZ8!u4WUEFQ-L<#!$pBS2CJ zvgqM)j$7L{s&wNt2#xsPe76knFXVL_RR_70UbPB=0*-h#3f)8Yp)dh=T9qSt-zjcXzinyKy|9f*+SZ_H>+bYVZ1Ycpxzxs12B?O( zM6Xyn#Zm(-8*1&+9Z0#FOo9b33hhG`fQsFlXRG~BQ{8^D4n0oKiA>c>ou-kLKt#{S z*12X40(`{Tl=!ru2LN!}opKd`G&H0^278Fp|))9Hqj} z2J%EX8#nKi3n#@Nij@s&0q)8dz5Ualqxl{aZaKm4hYj5Q%5Br(JKXy^@;X_~=8Y(UyJFz7>%S|8)0z@# z6TzF9;n7N+AFkWU)P^w~t<@h7s?`7xmw86rfibra0k~Wdh`xFZCGr7?i583x9u1;i zH7a>{>#mI3H94>4*V-$4_Y>2rkh&n#LgmHy+Z=O#u?z^00P}FPn4nQ z%6Kx-AAV{pMpng!0S()L9$4@2&S(;yaKopKN+C={F00Y5 zCgen;x8%vCC-iSSh$ryZ@$_ZgslVUVj-U8gSg~T}r!6EX@VO0uSehj3y2zxV&zXrc z{2h3+vBz?~Yg-l-=!k=V)EWsx^#BKVP0T z?CgN{U@4r|_rX)O7xQ-vv4xx{IME`2oI=ogZX)>`zBnh82>`R*9S9zdT&EfRchG5~{f zp3`u^m+4nqavmtAJP1mv$;8;ugG%yU#^O_1U8tD4sm#oji57z7ia-se?p))ziFt7k z+_pbIz@DWTqD6{{=PJJG*T+{@d#Pbpzck=J|GH7YEF9?M;!K&+t^0cevSZ|=F5E)| zh*z4VoJ%cUcJ@0=6Kg94yK5yu3&$aGG<%eq4RB`dich*UB|$C@EV;#Ndff zgF)A~P^^esH1Fz3=D4qjax`asgYcl4uAcyWVBNv^WNwUX~Uyu&nwj) zs?2fFPiT=@M^0%!zayjoiLSS|oXgi?|FmN?6`RPe^RUP_>x_{%cY1r>j}NV7R45oB z(zXwxCF1Ho5e|pesxCo0N<8+slW;^b66qBl?}_z&Ak1w^q}Y(J5i`NPJzB$i%m4A! zt49|;k4bBeeN@)SwmD zS#whsvK)zCm)uW&c3*RO-^()+~^kRV_E@cpm(F8*{+i`X1pJiKL!d4pmZTB2|^={c4 ze1^o~7EUXs>Hi)RA9_Yh{q+@yVH*7BmU~>g1d2;cCiyUhIQ_y)*kmF{kCUTP+h z$Dx9s(WcHJsFJ(JwLx}n-`~^!cqi~UMc#XvFYT>dB7e8^Xy&^?`z~oSQ~T|0*ViYPJ%2^5+-Vw-v`do)eJSKBF06 z8SkA$e8v}$9bUDBFpaU!@z~vkZYLl5OkKm}qu)5JTh>nKGJRQr3l$fT`O7E08_bll z5k(K{TYHrY9lOH*-mk2xqOv3MGFH|Tv&;2&w0y~}D22fA&oisO``;zYC*J-_p$NM;1JxNV31 z`bfNprP$-e20Mqo7)-+gGc!`9vCeh2Ja(<~{A%JbtU2Pt!18X6DGTIanb^50!mI58 zl_o*?>GYmfQZh))f&Co6s!$oj31hf%!VhkcaT32;bB1vm9KF5~pqwajM`5*RjH2(_ zc!~4W2c~&_S1+O38R1b8%P#3T%Z%|8x2$2AQj~*j(-fuaGsJ}dqgp{6wnA`{K0Lzn z+u%o3(E&KOSKwEP#CEkX394~SzA{%DGap2(-SIzo|OS0>FKqJjxkaD zLxiG>7%gx{V9~Q$>Lr?fS}`*56@^2i^AwKCepxK2+EcrjbVLIEbkSWR*;YV+WJZom z@QvjkWi5&@{N?m9s^hfaNWAyC5e`%)jhWRV<04EMDD>!G7-zi&AJd{X_XgytJ6cu_ z(ziXrY3jRanFJz<>W!$^COm|$>x-XGp-<%EY(a&YrWi?dP;EE=jkKbh!5%Kw?w6RS zNc_h%H|9eBzAkhEM(K|bCEM*St24qRtw;9JHg!CTuwviQ$`R=Otx~Ct|Hij$Eq&Rt zG}QA~GwK#E_PqF>T{Knayi&)!O4X0K<9`QLJPVrS87~Ejto@w&@M}Dt z3Y=7jG~kIuwrJ5R<_AaAy$xBT)cm;F-&?2uP&++}b>7tt+tg%`z!|LbUJ8Qsk^Bhq zJ9$3?i2pvO%SW?atmu0Y&=Fzhb-8iiz1Ws|)J#YI?hjEdW;cRFFXHmn(XmAOtGy?_ z(+=(6T0ZM8`m6 z#yt6?xX{%8di4t;2AZi*`^HI(KRnR%2Oo5H`F7TX6aOV^HI2Vi%6fCa6Mcid8S_2j z_&EnFKCa`=W;4Xdo??(bNTL7_;5O&b@SK?0}{S9+DaxKDQl+5bvTf)an)ZI{C) zCo61^bumK%s2Xd$WUOZHeEIyY5mE#Kj|$T(zy z^QsLJ;7B30k@i0q(VIQ29A;;nvH(Z;9#v~e5E1^4v}LZkrJes+fZ3;=bSOI*7QFO> zE26lwucr$8eCypKqPwQH74cjKhip@10U5;Sdv`x7(L_FKUrq3hT;Bn?-2NQNvez;a zjyf2gcHR6?A`!8A17p2VXAm5cS8KJv#C7!~rQi!y*X_bi}({GUEA@(zWN>7Fp@@bcJ!fROGuFjv4NjJI;FX-XS?!f$>CzF z!=UgBnUg&b-Lv;rgOuU`hS2loSQ02fO9YfLBZmF_Rp z${)s1lQb?R5@P>@JJ`>L$`%c_1@iDarW2>NTHG$n8TP-Se}x88*w$9NHK>FtwSCdV zbQg7~2MIZJD227WQOvIE$);qbJXQ$41j|uB*xIU2@Th}6UyV3UyeH;*SuND(>J+hQ@-{wY2iC*6uK9t0EL`w z^HF;*#rW)rSH7)^3S%-^={p)KL!G)(ypjg82_!r(UbIx-l{fsm!4L?1d*Vs z2~;Irglj~eE3cJ*o)_KX@p$8&Ix7?OmtpmC3V8L>EBfAV;k-|d)D!+=7-W}q@|?^> z$`@CMRo!B@ISi=wXTmy{Y{bKrD{w&s-O}J$^w?~p+OG=(hpD;04OoxMMK2{PVz#ZP zIq;SJ+3btL#snB5w>{yi@ofEp(|;I^RhT>)e(yGjWah(6 z{GuWHpsP%lJLiuw_90QdpMHr8P%?gb4>%S%_mai6UWz|X&op!Rw)=~|S8$8_1*kBn z?+`5Wj)@ZT!224ul~`<9#Z7uZSPwOsdVGa@_pX7DZ(KlTZkjIxlMbRB#*V{6u=^p@ ztKN=P1w@J^AKJV#7i|vEvF%Gd6j<$p$%j&Xe$siDKk@Q!Y+bYx_0^SYc(r51hVoY= z!lEP}tZ;_rO;5l203`q`VGhtD8TX_gOtO@ggfsr$o_2ldS*b7`1J}~h0xGW$eRfU} zes8?LHRw5DsNbxhvN`Cdqm8)eyU4f@J`B%rh9!!WYwKg?eB$mA#mXxK6pBq2{F0KH zRT>(-n-6T%3y4}MJvMr1UAj_T^-eLg>k+YaX2BPWr-U!>bbB$VVQaBM0+FRH z0TQNsD&tLJqUaeQ=?v#jBbcpwE*7?<;0d?$gmj{hDsSSMQ6!XJQ*OpI?V zVIqa%aeT9cS$zhBBg-X&sA%ZHlKGonu)CbFBD^+jl?&*nuKy^FT_MYLflpe*49Pu+ zC%+Ez$vjVv=KA@Gm3z`}BfoGMJ(LyLo>)$x4hUr7TNjZw-~s?{vd%j(M^tE&Ch*>C?x$jS&cAknlq zJZPGCi7?b8;06Tj*Mf3&c2R>sn2A;9rheG$l{KyMtDDj- z_Zr;&->*QtTfG-=pC0~tx_*ezh7!WC?YHsxFF#Ae<4>L9$xacX)m(Ai1m5BA16^eRY_mY}}%HNw9@%3+tMkNoF64x&%ytNK7J&BNlY5v;kef63f^% zWwit5Eivq5q3m<@q@`9j$I)(GhOF`px%wQL*Rr19C`g}A6)Qa1VchvvUvy7ajBqY! zNo|5TJN!w@^$llipMFJ0p&0NQgBt4nR)4Z|rFswF8-3IMbpMA0IzAHpGmGB`uY4BoKvho$8~pRDt6vZX zA^q~|jHg?*!F8UJ{I|=GrIfGP@QxYJkBR9fAHi!&WR^NFr3XH_iJ+OQ_scKdGt6{77gzl>Uz%|l zRp(094Uixi`otJz@2JP4UB0%xS;K~W);TuE$Nl%Zj zb+<;xK?InbQd~ae~st@a{3rg`-$>GgOwR;Oow+7AbUe6E(mH7}oN)r1Uo% z305HBZMx2#M$n-YnwL%D73m3&E)f@XF{pDJSfTzzq0dh9z5*OxIbC|RTe z-V4pw279ZzykF0eeJbj zBW)P@7LlC*4}X^^g_p9aUlJ*Ycs=7(_Ca2$gO2kG)Cey+{lsG48p=3^U!EQEo`)uP z1?_p{l#yIDh}v{bcwd^EnEP**eSsvUw|z2)JcE{W1+IReZ84DdjjWpfio>xBKJoM4 zmaZFnKU-nAym=xJW z$viT%_a3JrvO|b)LX>20j&rPxWMt1ID-;#7%CTkdNI3Q}!a0s{4rkm~pU>xe-}mG3 z`{VbIan5zFbG_GVKcBA=35#=w^Z_8va~S^g*A`@W;bkw7hK$a2>xyFF9Ui|;U;FM| zjF3M?8I#YtAw6K%xy0}`O775%-I_8kxH@T5YpQ=IH+>+JzwhebOd)03$DHH7vXJdj zo0<&2*Y~W4-nZXzHT|3u$w#IChL+EVXnN` zl^GRej6r>NSG~-4A%lv*@QVO&w`BUINTM4tpDsMbAs&DZmslKmytgNptTmhz78wfnKnqL6d97~cgYY4Uv}&Gv5(#U#}4bew@xhjN&p zfS1R-5<=312Jult0|hP&*xxylkIv0#cYBAwd0W!DLE(zsm7nq~levXcR(s#QW*(%* zgE$HQuu#b0Q_>n`NGrkUiD=Fl5r5adAZ_K(@nmZVUwv&fdRJLiRDf^_PZu5j7pptb zwobsLN^K?#0=rTRDkZ*N7hRl^WH1!9@lo9#_r{C-DbB1p56V0;BGG#wi}ovFap zN?iw@0RbieTt>MJO9${}m;$Y1_?}X_^%v($_lHP$l&kVqYV@y9kLZb!lvJ^#Va}a~ zOQKSSOQ9T^Hv4M0jhs9m%qr-K8&gp$fq)$olF3d?Oum8lbyX!rMx1Rw{rr4J+}W90 zRL-Zy{3DjCd$$-rBy+(gr>n8}74S6ZeekHY-W$-<0G{$!2lLU12W^=$T7x6;VCSBD z6U^UOrXD|N$8U0%0L@jB$3fZw*WW(`C#4=32XJyL=sF4^wue5cN8K9pUif(fKUSn6 z5df$hR-4lO3+cBZR>XBSv(6cbXX1(3uaZ2Tqmlt$~{~!aO(%p67xJaTWVgY4=pm#g>m0xCcR?(#63)ym-fPMD@isn4v z6K+Dg9XmQpyq84egr_*hw;fctWjaO} zWv`uHF;TXvKC70jTp$aJY4l)cJ;tw_Q~!YTgd~~(^-zXq-y!}-BUZAp zLOjZ3M3dZqvI)tH=3(w(Rn_&lb}t6JZJaNn@p9Jxn9dOvy1%j;2xJb;helV!(?K&B zpgEm)8Sf|~XZ!RJcC`U??#G5&6dfml5>YZTHYpApH~Vv9VFd5~2N<-sKGfOq(TWc= z=Q3k^UVktxGia(I>$g(}NChTvBSIr^=laqAnV8bB%F&|bUb+ccnC97UrEn<@NLVHy zT{zAbR&;ylsu()4HeC#Cieztw`B5S^;e`YqxEn z*h)9*J*rs^{iyNGnqk%7-~TQbX=TU#?{v8lxH?N`MW~#go0m9VqI1^)=vl^Zw;`aKJ`LZ$Q3kI4NUo+WW+|f#zWZX%UvF8^NoK0_-B{X}3*-#DV*@gLxlYR<;=IoSKRn_sG|=lBP) zGM?U{z{5!tvrg8I_hro&8s7B#>9=^*k(;65nyK+FeN954><_Pp2&kxCDx5) zn#c;VnkZcQMRqUT9>BV^^Nh1p0|Z_DAt7kr&R1yG5|&nOvn|~(+)f|s9-?BuI5O{y zuQMft+tznxKKDwa_*IV0Ex)g&S$xX52e7h`t5ObY=E(yoJ-vYPdh`$s8xg5 zgN>+oR4;!HmdSU%)G;!;)#TJ278$rpVO@87s+&SJmOK+0Xqy&GUrOQg7Xv*uiUOtMUm zesmR+sP4B+X`4Fjrf#y5R(^hSK5le<<{XItsn}i>7U3`-=eQ^nqajFlMriUzO7Dv; zanBS}$Mq&{_s|>OE(Tw1BW`li{bubnVV%!nk(=r4vZ0ZD5-C8f5rFQZk=UU~^rr7s z)ZARA5ak~G9jSj&4c(cT;Aja1J8PwFXdP!RrpSJB`S`?!*4ZnYb_(50R&@AeYSXf- zE6+E=b1YSzrRenZS^(XZaE;p_ioNa?ayqJqFTZ}l>;v(@?N*+ig zCnBj{fiXGASlAft^zS`X8;FJ3xfH<^b6m_1skBNJvzHv!-t4BFQ@e5`g%8hiNJ(4ONgcsaK%wI)MN z`0-?+>qp0VlHIYhdxY?|ZqHv~_tj8tg)DV~HD*HcYJb&QgVy*M@&)1#5rx072M3B; z;!37eE-sKUp@Sm6!=;GFfIIh(VoAnCAx~mD)Lt}qCTqXKowq&RK{P>)Q_8@-Nt3TV zzAi_MOZ#L)@0EK+^ch^}En;Dz<}Mfc;i_Qg0~!y4Yhvm}O9vL1Bi}7kQ|;y5!*ZkM zG@9k%gUkkS7(2fLy~&#ke}7Ybb`hoUn%84;*5SHkNY_zqMe$4>y6sIdT0=ctz$_cC z+%cR-(H!)2RS1X+6b57EG8_7BGH%C+xg(ooZjHUAaA>;_uzzv9gz8p-GKJJ5^T~F1 z%g8aRW-!}DDCn~j+^??B@6!3 z(HFHg>3Pj6qjp?5`Nrb?#w75W{)(TWv;5tu}RbCWSOcCAd%~ig_GJ!;L|%$+FH- z3%6ZaoWRLvfoO(qIDfL_h0r-C&@q(YE+qoxi^?DTxYP+r9Au$b&XbXoy+3RYDo>x( zUilM}(xYY_U-2f5=cC#VU%7B&ur2st^o9X)4PR+mEHLIs&fqNq?7&{Wa0x zuJ8eZh{C3u>zlq?Uo$N0eV=0QW-<7k4CDb>LrLF{D*Yu6UmA!UmHKL(w+=HrQ?F|K zIVLLZ@8@@yGtBIE_q)nLGV};}i6ZTAmz7dz{5b7coVuWNfkch?cggtTh~?1Zp8XF zY6-vD5DzewAIidso4=rLH{=_5dL+?dLQ^${%wa6(tbVOii~A$L;XmGBrO$|PEvS7V zYESE1q=~+IUe|sc)uHdN%LC%Q)7A(-eDS>Q8dlP=K6!eGcw?MDcmq1|POZhO>11t` z@hvm2jOAgGx^v%o9R5-8U$Om5{RbMVx0esJ^47x0p=t~}L>p1NK4X*jd;a@R`y5-k zDS3VUp-Ut)kBGaIKKd7z$RJq05^Bk-g)i+Wh`}di>L$iV};<`g8^9&t!baMwidBqe>nE6bd=4YUU=ZB2+1jHH(gq{KNVZ3Ew}f!LoW>`v1}W*hh}jsS&QKm0f{tQ)0`BLtK;cDL*6&tK^*ks&2p+P?vXO> z6h4kdD+(wpfRu}JNq>^$5UGN^_<vE`Gt^Z+B&ye}y+a1OWAli`I zKQ+$$UN@*DgyvUlmX|<7U!5Ng9jRB_RL&lsXdJ0y!Z58Kv+wtCgYIS-eGL&K`BNv~ zyr{-&dCy!*PfO$dAz`N%wveF5O1nlFV@K2^aELX4lov&C*FqjX<-U>l8tMR@w0^-`kgYr8Je5B4{<5ia2gPZgK z$qZ)~k?29qO#ap5eT8lJXm%%U;N#=Dh#x*gBobRmoZZo7Fps(swI%`J&c2A>zxp@* zljcoB|G#Wa+098=8$fG`C`P8sK6kpZ$N#}>@Ae{~8}i11dg0-xe>Z8{d#l@UxWnnj zchc6^m?2uH4kMouAv;SJv478xEY%ayfZv=($$^ZBp|puA)DWd@bJPk?JoK|y9wz%GI)o9ek+CvULoM?(>f=}b7#`uYP9~#n%H-?US196{9JhcUAq{C#G}<~>RMHbK8*Euv*l3B? z{2dUAxpKdMO3^v@lNY#~)-B>rWUN91!~V@>UG3V<{=`cQ2FuLwy>F$fSbzx1rdz+> z=sR;AGSs5GuA|ThY_~}pWfrV)A9}R1{EKwZUJ1MRxo%HAC^=K=R3M__e*BXN1yrkL z(?k(>`#jib^NO$MGcSWH$DuD8;d~8A2cm~Q1X_iSl!T4l)0QRjiJe*^IP}C1Lnsk^ z?)dbu&$xu#8pshfHidY8*!aQwdySy>H^NyAH2~hl_^Tv*S#BW$$ z|0&R48`em6g7hTbBZ)(x~S){tpRwvC=%ZpqTAQ%ODK$L<^EZk@RV8i^*oT@eEovmeL-J^F^ z&XLNWwoPM3G0h!``HWpfmzxEhezH8bem4Cq@{0>aUuvaWe1%yln~*=sqF%STmYFu( zKB3($Sz%wV3l=j)qfDh*NBP+UV3mxza-vUR%JVdqs3!CmOR3@o8KZpH9pd5Ro|Gkg zxCS|12v*{2XXMs|dR1Q%75b}rv!oY=YOfJfhfP&Ty*u;pzVoZo;l?>B>FBf$U_M<2 zsecA|`bi=^N**OgtM5$p`u@InA34mLbM2|M)g3Qh=vTom&ax-V z0Keq22OTq{M+>S@6Yu@j)=Dmv*l0srbpphgKXaQ1$oYl;`Q>^4jJ4G-tS42_3G@DL z^y5FVdM2Fht%A-P(i=Bcx#9T3tFJYa1p=!*m;Fu%YRjf2=gWSQHxy^PUJeIX&_*1G z{3{^s8|EA}ia!!7SuH(?1^PvGHX$SEfdq_T+by6*j#?*<@nvK4Y{9@zqsK+qTl8tP zK%IP^XWm^xQCraMLvgRO@{5F3`xsm2X6&!KQ>FYlCSH>F46l@J`u5sq-1?&30F~#r z{$pS6=Zf0+9G3u(_D+E9Bcx7)G)N3n7q*}H^J#FhJN2XF;iCyV&{Tvfl)9xJ3BBQeD(_PR{|QA>*N zv>>JA@M_vuzn2i^i(xo_y=mP4Cb1yW!}qn_iJnwN(!{d-<=6)sV>?$1|TGw zGSo27uYv-~%Oub~N=8X3FIRiu=NyBH^M~!?Is`TD+tpjU-zfGpD0+e`PKeHbX@-ta`-=|V^507c^K&&4Vn{O*(;tB~y!nstL&`7S=>tcquahEQRN){Vak z3X~~4iaHhm_f%8msFBf2(`kL5pcNHtT z%V)x?=!T2I8f8fM8xrXl?JYwn3#~6gPUSeQmWlQ5kl(;aAKXRw6#Ow#93jt(?%&pb zf8F>;_(C%eaoM8cG4w{ok`f;eG{7R6jWxeiu;$|?<7_U;y zY4otT=>9Kjc>yyaLwjTQyS zGQBwy`A5ljyo*LnvDn`HcG2UBC|0mDpZvYa!R3_8>Z~RC@~ao?7B=3|DCWeJ$%*E< zYqz{FQopfzEEi1@65P&`FBLJM*(T%q5kISmM?Vl5np^y#!vNgv9JFJ0x%;F$C$X5C zFpRt{U zpm{ANT_9rfJ^MtvIU6bwMP7nyQvm*Y>sY#C*dl%5F3hyetEYzaD}43L$HK{W$5Gx1 zGqf6bzsfN14XFFo@p&(?jwB*ub72YJA=czoY`Oo{Qu-3W;wZh7xv@p;4-0JpvAHFl zBLWurN<>H722g@vvUCLgqBE@p%~|Af#U~OhSS(jv<+uCPRX>D}Xws`xJQb0k_BIhx zNQ#f(48{Y>Z9}0OJH&BWA@vM;Y4^Vj#@rl6KOYJC%~)+IO54=_10dhAIsExOswXt! z=w5*6VPosDJfAlB6z$3f$1NyYz9WJc#vojI3GXxn=oCnpy!fQ5;AD;n!iEAfhvfm+ zDLN}5)JJS>CCPHPhXr^+MjK{b(WUmmIV&)U1B4vx?wxMyTf~?ZX*0@L)l%>MX;f*S ziY62F*WR4}I}l)~{XM(wjUAuOBhFt4SLCm?eIgQ7zo7}o6|Q05Zu3-`4&C^7kBGG) zUe-aKx>x4YR{|Dngs$Kd|rY0dR3Z6+43yRr0ldvPmokQKMNM+CY`~jb{ zX?Jt|BK5$I@+jS3A04Q?5m=_*izP|q^WwK}rO|b$`yke89<4hzZZ02U9ajs_Mj@(Z z`^-!Fh0QBne^NE1L-Ia7YD#NZYzwAeRNR!&k)WBTY!d%hm)CP}#<-iOj129(`{#%q zG<8zi`8d6cT(ycI-n8F+Yxu7Pf|WfXS2a-oBJVOfiWyXvMw#+;@D79Z+~as==Oijr#pDtwB`7|pe= zaeTeS{GCh$6;eEwI8^Igls$Q5;YQrI6!0#<0brtZ_HS(i2Hf)M=H2~SgzpO%Rs+;I zDv}cEd5b}6u)ni=pH610XJfP85D!u`wgYR9l@dIK=TkFygqV#igYmgIK#%kg`~8CH zx{wT4igVUZ;Y$|VZBAR)zk?bWcZ2UM_&lr)?038~20ksW6iPfHt467}@Tmlr&@1=K z5Vu?eNV}~?FV6_>`6j_WfrfO>bVC+XE65aCjE(jKQUa{Q-m@Ej70nY6^06ejQ9Dk0_&=3IN3CnAs(+A=! z*WoRd761rb!BP`S8k_vrA0Q#V4zyU-j3b^g$8tz~XE;CgV#J>olX$sFKv4ntq#M~3 zOpiC&n}q`r<#OCE`Dofu=nk|?w~D7!syBjke<4CQ_tJ1Pkn^hLVzesFi!pysd+QK0 zW@f4|@V^!S>};UmJIlHk)ve@vC3nPodvgBE4u2Poj|e*LjdGrK@~{`W1ugzxBLV)E zI(vhW^90ZIWrZ5)+g-s%KuZEYw$+1$M$5Ek>st%6Q!nB|H(%#v&RJgWRrBARe3c8_ zUbJt*CK@uJA3yQzLfj??j5RX=s44clr{gsoY%`F4|1_!f4%knq zo(Otb@%bCQ)@qUim#H1VxZ7AHp=rs#7o4PTlPHpLU>$9ioWxEEPRVn-62mQjxr7SD zleSE8Db?@3E{*iBP-R{gSD<@=&A;vi(G__W0&-&p&hFCj8EuNmR}ei=Cc!viu@~zo z)H93zD(?e;HFC}{I6#BRA2dg_k}lb*s;|2JO%fAc?AiiRnyn{ zhG1%V8;bAYNZ~V=xd_dIMin~sp!3@-XSs2w9s~15tNCZlyrT2q&{26D3Je-YdrT8} z`o?fp>t=s^-vgC7d7ov`NmJy6`sZ%>Vre8Z$Ln+Smv&@L9Vwu}ry)XI>&7l6sMj3gornY*z$1^%DL*FgX6Wx(!4*pU9$*n$-xD>%)W zGM4{eKZGWJZQB5MAKoB6mE@$GIGpJ#AulpzMEC!NB~Q8!kO-LvA0FOS@Z7Vc!w`In zWpRYx|Jmg|mmtLn{G8Jvlrx(^HyFF#$7Ob{ph42kS4qEb;QSdFB;)KC31Uuq)^Rr-Lh_5Ty8{n;0J(d6Bjc+%qxBR zGE!007&rK=P*F8Ev?KUD@6Js5N88ewkw106l1g;q4{X)W;H&EfTrfOpNGDG|Rc!X| zE)+L0>74$!*KhkeH&uRn*yeVn5>&CR$MxrO=N$wb?7?wf>@OIPJ6pyL zQz64Np+LFUH4-`%$yN&+Vr))3UMxB&fWin(V000>_7pDgeLvZ||86P9A4yYOgU79` z^c2I-j8>Ap9hhv|cLG%zr*}M=A0yH4E-XE{1;L!gAM=w43QaB=At6+Iq(dX-_hTqi zZ5Hico}y&ZDSS842dPuAa{!+zCc5H^_E8S0mWBgxps5*o_$354Ll!I~95(H&!Epnh zTbF1L$a8khC~jo;7;e+UdvF#xi@-nZZAw9&o#sGyaRW;*l0(Fl!-^CLXEBC?e_OdJ zd;+HbY_fdjPg=)e>9M}a3GDpRQA--Xr&ZN%4M(?E70x?7zCIa0bQw2U`)9V>_07t< z=(&G8HhzVST=Z9Ce7LniDDFExU4LoP`ePti#a!wXTp=Fz|pKkjQ*jYaD z>AsF6i+aOpOs~?=KQP-l_nt>0Mv>tJZ|GW02wyk8aHbm%aW|U#_E>6EDPmDr?&;tw zzVqd&n9IkBWd^METeo{&c>cATq4_i;0l&%kb4>8K;KKTk=eI~d8@loMk&HldIdRW! z6{EO=4z6@>lIm&c4ZMy=JO+X{v?Eh~d*n;GT_$BuP`xyXBy6hGt4{(XEN^45kn{Q3 zCykSvE~zhzahT$c$aCade8tDU)T3??DNWlf=bZ0hk&M;l&H{SbLx|a~e2n*%@3>wW zCT8Lya=>i|*U}MyjPPB;;n3w=E4-cuaPqmUCA51bur8eUFazjamWLjGs;xj6tRonb zKB8}lV4PaZ6{V~jN5(HK1b5hnOq^yEpFODs!%5I6{mw^XiR2})tA~}3r$52E3MS}$ z?|2}-APtJ`^l}~?c|DwCqidNrTiK58OE~VYbbG%nV^WZzSC5U9Mao{b-xy+7s~X<5 zgj~xP zW8b@oNijqWs8OqPA!# zo<>{t(lyD4QoACZW?vs>k7Uoh8p4mFgr$;Pf4{K()Le*UhKGi7Ho2o~xbq6Tp1qQv zeBa(esnKTeN>E~_ps?P_zjRKyEn=f8Pj-ubYH2*-SUj!J@`HrnV$HG~b&72o?m!Ge zlUIEC6m0S0w7$I%QICh5%{O%~7QsK_WRs_Rb2MezFDbFrMvUqA^;HI(uk|YlJpQu7 zH%9g&jw|WT=Hft!g(;-}$Z}bwDI-TiO9k`DW+q%$T-0j3du!2yoS5O^J{3AWowt{= z)_$IZh0{^ZjHd1t7VlE8RH_JsX^|^^i4JgLplY+w;^I*l*=5ndmelys+I}=xTgKg1 za>#Ayd2eGIPW4V0#P<8I!#X7{7|K}GzOjK_y~y+%roncEP=iFruOLYaV`hZ^N%=Mc z{M;WAi3&8w;>~vU&eq&B{>@(!?r(C!R&3&gYQe|RFph(YefLwe+^u#ngQC*ZOP`hsj&2C)~f+*8D4sGCgz8onSXt# zV)j&S!kyUk9*i@xPRD2=SSd??IoTC@+3A%{Jv*i$zomg%@|u-MnGG;eK6ydDya6Jd zD@L$fs&dy2XLzdt?>|Ha4)tTFpOM##2~%iG@*1MEw;3Psf;@%zo$6u!ItDq0>F(O~ z#&=Y`9U~LK%sz`9WX*snc>#CAY%x}}AM((?C-m5|BUkTjf27=NyZB|_!YiUPw|zpi zQl6O!&fpV{zn7aIyf7?cSB;Zjp9R%Y{TdA>$r6SjLH07%i+&_PerkmJG3wu_D{ien zI0wDqyw|+|PaMAG2OCf=_6SDX*w~vh1EVrZ!caHZ#w>0m|GN`g~6;mA9u~ML@bj(Rw2y ziP;j+Ow?Mh(+f71mugl8#s&jkG@7Pdb z?8rzG+Ve zQ7+d%@lkbrg%u?S^}j=R3;Rgwf70<$OOEuOe?lG&hRHyog3hoK6c#=?i}XbEuZE5> z2iGFS_I;iYoEN(TEU!rXD6>#d z0whf!X=$??Pf*BlyvBz=5&VQA2U+k5wv);ZV=ixu8pNf;^Q+%m9~->NY?)>rY%0GM z=zCAwWTfHh;h%Ctx(6Kb>oJ*d;Y-y?ZLz5yzJHlRpI21do!MBtYn>BBm<%G)FU<6^ zV#W%$V}(yz1?oOon)9$CY=I9J$tbjfQM0Z$HXsI3JFC*`GCXS5dg`kq{l7z|1ibuw zVz_ym4a8LRm*Y#t(bQFZFW;PXLz~8$pvy+8b=uTPYYi&GK0H*NNSFY1vWJEz%d@y3 zVO0j0YBeua&+$#%G-%V8UGrjw%jbE#MmBk149P|s0y%O!V%iWKr0#{G?l0sJD;$v= z7J-{XnVQ5RcQM_6v_3x!tV4lca44^mB|Apo-(ff<<o#%%vfNl6$ec-Io*Pk#r{EB9pV-MAFUg@^WO=(fh%I_%@oHnfc86wK@LZ zGCV_{8bDZ7Jio|X?7qrz5Nk4{1f`a4;JiviXFOQEj<4K*_vVanJ9fwIHPoouiy7@? zSdk>NM7J9}9R&P;5jt#!!O0=J;%5b&0xk^yxw%_*dnSGbZ(lu%_q>~lovO{o+G-!7 zQu*lPuU)GtxYEojrXupU;-dGHkRNBGQNX}%?v#t^lfY3BJ%5n5;%f9ciUW4~=c*V(jFp;zp+ zzc-I#@!;)UW@JKSRo<_)X=AM=^1ZgqYHkcMr@^R79o2NzeAKMXZuBW$4uixl6?FNx zB6W}&=i|Bv@}P3j0R(m6NeTu<+_XRZKGYSy=}8gg*>gV)T^*UEA3 znS@r!-4|0r_!^0g+qx6$xa#mo7wXY}Iz1zgtyqg=VNFuzWV-_x){GsA{;I#g+*U0BqB%flEn z;i$wbEPdJ^q+SPA`Z4gK#DeK!&H@0x2!){hm4M^dOxW5Z$dD~N^u)fj`ap9CI=oUj)YH<{wth9BLYteUK(UigY#=rreG?$zN_Z(GjU;Uq%!pFi}0O$ zQnsA+<^J2DJ|`66EG3jTU(DU-h+BJft(+C>%ev7J)X9#s%5pNFF9!^~FQ8wm3Eppw z^NW*S3u#FuP3Gbx7G)BT8q%jbuCpN?dL*S|ixB!an&r;(&fgYFH%P}S3kmdElZ;G@ zAuqhTM)beic5-LRhY29pHTC?hR!7V}8l5~CEtOtPW-uPPNKbHHz8?MK^J5=|eT&GP z3y4!VO*BI8;cLoO`7kY-JcNn=u?t%5mvFc^%KShqjfdlAP&-z?6LD72=0kbnI0nao z5(r=>pZZj6-6YxGc=f+tZW8^9u#A>94SelFr&tHQ>%AL~?W6lL5ek=*?!Ki-7JED# zGuWF#EOQU>0sLGV6rk+Udbc2%KcG6ges2sj0;f9J~Kt0uo|4OP? z3sp=L`?5*rn*0?lnqN%u-U5tPn?Zbfmsz=}Rd_xAHEu2S(;y=wWB+GvD`> zh(dX(T`ICgs~zV+LqGqN+80&C`@bIR%>VhSKHYK`coFi7#EAa-NZW>Eiug z8Bt7dYDa9NNb$n954=7Y&!?AV2Dbx)D;yh{ByaaYY#-IQIpwm0O>`KcwYTwD{kqFo zEY=eLe-9-ITNN0O~IDT=rD*pCx%Z z2yt}m`vejzlt1c2C0SM#@Vez_B^wM4icC+IGY}~Im`ku z#+?cn;4mk?m?n22b!MST3SXw1NpJR`L#5Mq!R`%V(mMC^XRu0_3hT}TVQ>PVWM;8& zE`V=z)&bbr*tP=4eK>Vi>!Jl<07K@JfUz5`WICM!{GwDJY8Q$kSmD9UBZu%cp6q87}l~~{ATybEK(M@emTjiX=0LK`e@H9`xWB& z=w;O>zj(}fD2p5u3_L#q4Wal_EJLU51pAel0v^K4c?Q?76Vg3A|5$Gma=*3NatVXH z9t@h8e9v3i2q^9HdiWCr9iz^*{OYB{y|n%!GibJlhLxowLMOQGtU}`pgWynS9R74s z>ctm^Jug%3`!t>hecH`hjW08=_hw%Qyp9jJ&-(^-C`7G6`x*e_i8)K_m`S4yp!DrA>BLc)ZP!qgTzle z{~$JEo>28EWfh&S0ETEbau*NrA`R5FkM_a;6Y9Vfe`|Spm*DmCF6>6HC#=1oOV2gr zaVK`#^aWP3H~H)(fR^q~`hvtyr(%O8!6!<^=Vb&Z>(M_vM!t`Ri({I`h@<7O_aO3L z|78V!MB8!LKP$JJhIK4lTPtB}jig`QBJz5JWCZ+DMdHJ=nZ56HOdCTdo|VfTX5@R^ zF@r)`CW!5YN2kaJ1iT1wiuZApk#w90BmJ9!;|tHLXtf;RVg}NHL-F8-Re>RmSYu^1 zKh36fjXXntlaizM>+$}(nN#;7$}xup8m9BygyX=lWy=b;d`U<_&^3gAD_eGU_MOTp z119r~l}9AN4gayS32l4FQ@*`DRIK4A$zzwx{8{$uLPr@lXDD0rNk}P`iUgrTcoDr^ zQ_@$M83;$At;Q#>Dd(al@uN)yAZCaH4(_P;X0c|=HQ46l@6Rxl!zxqd=zPWYBHXIH zFD0W8auQd7-HZ0itk}km1&qd)*JR(^e2mNJ>#utq2X+r95hm7f9YK>w__SBpzi9F~ zG}nuruc5Js>~>o%&Zo88<#lNr=}8F?*PJsWq3}-SgMwn5$UL;$(=`rna4`{7pGO0AV3eECw zmsQ-|{t5Z%ty@-*vClXxvar>|%{8Yi_EJq)4m7kS;q}adF9MXLqEv7^!nmN@$!Ol& zq5OsU--kD_ zu>5>?V2EG;c|rNi-bP){e+kgKne$^B%g>nJ?c1fzq$jl>S>N=6S%|g5vFLqh7 zCT-lV<(fjaHyrBl^-PlM5~zYMx{-e8Zl-9zS@k1srnn$BM1KCj7Hoka?i~R3)E=2t zjbRotrqL-~pZ&y0qce))9Gb-oFNP9+Q*ED`e&dsYJH6r49e+mGYEZ(R9x|2T?R7It zMWz7&rlhXkqi&10kb=}9s8ekhO+=vI9^8)Ks3p5=HIPbe&N6O z0CG(oj7r`Y9e_9_dRPWbl{w*L0~w7YRFeO{>VAci8}c!~2DvLEV_ICR@#{(MKRs`t zlkp-|3d-#qbQ1aTmgkD5Wp2$MZzcFUk>7lKG+^m=$F>eHur$nk8#(EP!XoPT2 zgl?hmMc>`p<;@Do%)50#lOGrdr)tVDeYC_$lLsw%?+f_a|Ah$tH`LQn_+ghz1d=?* zm(fQ4s#OT7Y%Dw=!+;E-(rO)*+7gyByiv z(7+G@=VV(*{uA(yw2`2$OUAHSx@(CO?j5w5ZB^%Sp&6&5d;7HG24K(lJr}9+#e~l^ z;5;zN30Hw=#v!51ofn>t1wyOhBf!UCxZ{}1lVv!c(oSzZbX;Y)PP&($vQxvwigG66 z`=S@$KeZ1;aSK)(si+Ol;(u@c_h148I$s0hdt7EBOzKVn&ai2sO-3OI$bGwKOVX<$ z#ix^H`PBtMx@puaGJREf6&Ru@7SJa*@$pVyo|E)qeM$a}4sf3z166rt)GegucXlQt zx}H1PStjP+EHV8H*?)ih@>+I@gu65US^eWg-}?sOjquqFjqTN4>SSe8Gj zc5{qRHIf+_KD&kUS@WgTw+--CZB+rup4~v^zR2dz_FJ<+j(0FJFKm@@gJ24oUI8EW zUwq%b!IZKT!f1o=S&b;$KcOmM)|}d++6vv`!@weB!z+Q?FW;; z=F_n?h1g&vc;F>Br}142We-JY?lC3Rdj}Z&E1u=U%QtKPe``0od&u9VO)i)YAiDm^ zZHM>7T;5Et{}ulVfAsswOA8O-sr}_OE>;$PL`-OdfSp8M*cyreD0Wo;Fc59s>P%gc z3u^zGn2yc+baldRU>#Z%s6E?&SCN*9-`I~+V%uCxHvEo{EMU1FdRs;H=#}Uq?q{-> z^G?Cl);DiRXgc>n7x-}89$;UgEPwnvYs0{nCZg-wNN1m6qEgllHAIqrwnzk7*l6_W zH)~xF;S6kG&{GUxzsPcT3*n1QMJ0<=Sxtkd>=iD8Dr%#YPJwk)+53-&-tH)Prg72hU;rBj#!zElS6#gRTXo(SsrM*e!m#+R<1!#is=S}?a2NV- z(RhCFe8B-kIvyP*ZTL-5saOVJ*MZGgeD=Q;oQ?RIjP(Y$T$2vncU_r2D>|bVZ9tNz z(}N%Er7y)Sz-RuS-$%2NQ)Oojz}6pfT}9mc_h4Ey-nzrRG^}id{~kg5r*((G0f``) zou6e82>kC`x*NtXVt@Xwur>+0UtJTC|LCu#wr0W{@;X0_(y1~XsUa0^K{(RWRZ_Cd zYP5^W|NCwUU$)H)1|(i(X5JIM4+Bg}HCwx|=L>X)g92KKA;i^A>#?q<%YH>@EU zSG@1sgex!Q>V|J*%y%Rke=X-07OyNQUuPDSo=i^tuh9G_RA2}~hNw+<`B<;Le+?p3O^fFdALR76FjNiQa1K@kL{_bMPDO#}o&A|OR+ ziZp3aX;KuV7bO${X^B#$cOoD)lmtR@SJ>`-_Bp?L?!ABLM<V9P}UGHOb zd`tWrOZIP&=yQk1Z`bH?FaEF?<@-Jhw*X7^6F?3Oibx$CsR2f8!iH(2`#%N+&my^{ z>(ny(ydqj${rBN(BF5(8k{V?QW{oS>UlevJJ$L^svaz6j&DGSSNn?Q|`6cqsZi}9z zNlDJlHQhfxZ_t6k-6sV>_}VTSh!@S zvZ^}%HT;z>b(6z z(F%8dURhPW>UZ;w`P#Ym%>q~N%}Yra-5)a*`$(Qm>63)pOtU>{ZUlU2Gv~$jOI&}z zk~!ILW43jg=;e)Dos(@{O8nBs3ykB#(jLdP6|n~QtZh_Fh7g8wAo>d##0S`}hed`MbxquE`VN^D`X zBkbU(7@&N*O5`d!LIuIXi&U2b{YhA@eB2a`!bE}R)kyHPM57qsldD&blF1}OAX0?v z0uF|YnZ`@-!%D9hsE{AH#!WHQs(6!jil-Qkr6T7D6FgnS#Arc+J2`6#wLJQ0%E7wD z62q#5P!V7F@h0XGY2`KK7dRNCB{f=*%thMH@MOWTR+E2LMi-%3`OzE8>Co#+i191t zO62wBrWirO+;#?HoKLi0ke}OZ`Qh$_Ps7Oo0R*WRSHH|ka24qnT)#%Kc(_}?y0aC7 zq!6fovFz;Prw_-(gK-8uH$l!BEU8u>p#luwWq5Y$O8@cW)oy~~2^X25Y`>mCPIjI3 zVj^EIBHZDtw>Z!)d4#QF83F~uE=4Z((CKF`DtSoE`1yZ70jKr2}G>GRT&h; z%%$J*nN-6@Vy93gnANLH3Y0Xw>R4BWe0)k696WTrK3d!@5c-&9cE7fdKwQ~3X_Iu} zD=14yBu7C?CSilq=p|16m^VH0$Y}WEoOtK_JaKzF`%L&E&4b!zC0Krywb)FtG+~{7 zJTSqvxe-kcJ$5hfx}G9=VRYleCv#+DO+(tk0u!@KA$me>&G!uPhU$p2+iPf@&u-lM zz%#F+5%$sVxB9iSu)6CQqp$!k;|0R2g((J9m*HCO2F9lgcNR**e_IWPz{p=FTToCp zCB$~eh9Ebbuv*x7@7B~ZHfGG^{^5DTY9dEgVsf8agSSME_jVhJTM1>2-9FRs$ZL_X^Q+NiBN|&`ut5phvDs*uot3eVNhI&saXG#TGcjBCiPHNBRkjkmQr+GZ72 z?rS1&AAHo6j5#l(02WO!L}$!`YHcox(PMPWf4Bba`po|0g%bnhLYPQ$vPgWUq2J7b z;&Vc$7VVAUsY+6Vzcj(Z*)Za4ez}3K3|zO%HNl4)c5x*T?fFtWp)_TNOz4IpaLOuQB$9ihliaZ;F(vfAoZ- zd^^U0?iG(qDyn4f2M-Vnjvvm5!h0InJ;@w7uZ%i!*QV41fobM+u)ZDGYE@d+7L2#P zc{(}NudK&2x7?s=1uP2`lMKY^!mQg;I8SAZ-SP!}#I78|<>v}fR@wpa9E+ul9`h3P z@8oTsS36C?agdgl+*f%`d8={SHL;r>yRzG-z^~;xn<#&hCr(2~)(&o#HSgKem!u#6 zF72x}{nVZx#2~MP_FDhGeYBD(6mvkK0pDT=8NCjw1%h=XH|LrNtNPD|= zRgDMeXSWiS&diWXb*TqB`>#8Kl~Y1$p{EW;TiGtkSpO-tqIyt^QC~a)?QD?Ly%?TE zyH|>6oez;7(Q{%4991S}zm>px76(o^JaYdy=lN!i`7Cidgf94;dSTmL+K#B8uUTwK zV3BfZ`(OzOxKY?o3EfLf{)WLPR$Z6 zZF-LP5kq$zMH?7ib&Pm(f>1{N$@5B`{1>@qaoqV<9!&de>lEnYL~iiW)D4{Jy)(|8 zgMAt}afp+PEP__wb#d*QY|I-4E1y`)YGILd?Hh zubB@>Tsd(`yk9)&6f8o283ezlB)9e?5vdojXp9Ml+Zbo~0IIRKoJarAUj`pj-$0-& zRiYSEI1Nuw8Y9-APy$!SO;~lu4S9ngfrD2n)oAIwPlQM8pXB?wIOray#LQJMF=!u@4*M^Eb)0rr z3*|&?xHNT70jyK5P!1Z%M5&h;rO40sz8wW#3n7I?SY-Tho zVq>bby8U&$9GsHSM4C^IQ~RwMkLMhBXCpIFIQc%{?VhFU&2Hp^D&2BZo&`B1e=^pu zl02@2p8ujh<0lVi;F=oJl{S`Xq)oo)F3FdA@ixmqp-fS9#haS5R3+Ot%(*jlVPz-Q zg2P2tL>H|Q8+AFk^*qF(SGRSiyMI1;Ra9QB6M1+#1MCREKgKX&cIk%Nd+W=JutlS) zaz#s%w!i4n@8&@Dc z*o7vcmbEI^G|H{S!?``Pxj#&}pDt6t`YOO`(#`4y!)?>+4E7FJ;$7#c^s&1o;Dx~|_@i+eU90-Z@nY9Qq*K4OR`j@jysVshc`R?LgiHCq zsFQjHkzB}S`DKA6f0fns32IR_hCwpA0u`Y9Wp0F7__jX?>bAIQT^gn?vit7jo}4eqnT({6AV-K ze_c4HoNG-i)<^HDgxZP19&v1u(~90NS;GbJ#V*FE@# z$s-C6NDcPM(6FE{GH)-^$5{6$(Zc94b-+N{V6+2MGFy1 z7^aSK0Ip;^fqUB^5{yraQSME&exV~k)ux?Gy#RXSmAZRB2=wXk|FR3}9FkSwYR9*) z_rDdv&4dn&gqqqk!$- zi5D_#5(ybffR*N7D+*6Yaj1SpjB9 zoj-oVV6^sjP^Du;!0|>|jXwo4%4x`XHr`yU;MI4o&0i}r=QsBWX5XzQOujEdTz3Kg zSF39wamEPZG4hK#f2m;%j!`9*@*=&RNrYQEUKlsgx2Y)avQ+HaKkWuYGo!?8^P9bj z==b3d5QDuBpzX|16~-i~>|leD$Rz4S0G?YfS(aHn5Q=u6R=CyqB%|-dYg4ss-1na; zl4~k-PaLNeAx!u=kFGf7T>f>_`3=UcyMX>*??jmz)#bKSP7@*Xxb2tTB7ak+6haTR z%csOjEsF_yyO(7KIIMNYsjghQ48$pN-4>HLv@~=@YLF-U&e}q8LCGyJXO>mKQzgXi zbS=S^$L@m~Api5s6x{J?tT%3mX#aOLhw>OkJ_VEyPHsd6OOv;m=6LJXLDKQd)fvt0 zg?E$uX0U=;xv$QC^=G5`?D)j*AXCYm`2z7jUnNzjEjwYDXRrDYU*U0<6@A=O8&XVx1}1O zPO^a&G;xfnypn-&A654=;S#2zi>~Au_&^vw|1JLgU-}13V&8dOJ2chuNSa%(0&219 zS}V~%6tZw+b%AaGosz;% zimJC^VNd+Np!k)<^Zv-=dYqA=ksejo(F&Fn1e@MwB33qPA%>Z-uJ+`yh)qO7;u!zv z$VlGlp;E(Mpzb=|+ET{6b|awc6ft4*B-*6F?^smuSWI^PGSJOsWgVW-#C7NqfS$D$ z|2`-o&l^#$09!H{G^I8f-Hw#%mElX8h#vi2qd>f}49|a^m6wJRQ%>8%yjfYUe%Zy^ z&&Dzl)JFbHQ=Z3NMSA}e-%<{=IO_Q^2_(Mwsdz6ns7d7piKi?JstEkhh0Q zakE^#J(_0oYxk7y%We4Ys*|CMSg`{2w7dKY`yzY1Tx0K!1xs?FY^&1y|*$^_$uRVWt6d<9r$d8+gW<@FAx}<{oFF#K0aO@ zd?WLJC?;+}H9KWsFhC|9?~HS@XS1&9HCurbC9iQwfi6Bvn~kewpG>6F*sx~Cs1QwX zyF(hv1tWMwP@qWcnf%FVB1$KqwFkj&`h{nzEiX@O20T;vAM$~B1Hs^;GDsHHUwreL zB^dAjVoyKt$y*g7w4n-y_ubW<_y&b1B*(Co2u-OW*P7ceL z_g_-~O|k&0gK;=c!~afjl>=2#fC(F`UPLTs0@Z;Z4svo&y80Kn2V6LD^DVt67Jzv7 zT71^$cmIh#;cuYdA1WR@a;2a#w!x*oN7HbeQ%)qDdA~X1bi#2r7J_TTJfo`&QU``?GFTZ9*R?x5WenuCC<1D!`-VWGs>S&dwf1_`HI7WAORlADyPI+|44U zm6r~A2fmtgyBJ{Ylz)H1EyrW!Payy+{88f<6T&fpOTTatLDFQee&QJV6UWE4g0kk; zV8+zsr$Lnp$!4^E8scm5OVFAGjc1{Z@e zx~vMX+x4q!P*vj=IL&EegYd1@{Kxn7ZQn6KL1|%X6==gmdu6*JznzVT(&{b2JBQ0SJEZEv4ffYWerrVW* zLfxZlMwDM-aK`-z1@o8l3c1MIj=}6zesss+X`G>2BWjvdEI*v;#vPX-;}Q~F!qE6D z&^SNV5l(8mNBA2R{|`GWqA&3Noz1rr<%vVZ)V1UZ>6)a~`C0IHiAO=hU zuD_sw?r11FnP2>nyQ>R^?xs!x!kVpF^-@N%U5CcPlfvXr>bUa+HF&vKMB2xwPF3D< zC*0?&$IMUmDEv4xZTl+bexe;H=X*$vBY0jeTO7{+J!lDixMPOAVAvP_brKam69qBx zaZ})FD@*za_kmJT`}m{J;cL?bbuZ_}`fpmZ-K83)t8ZvB&tnB{XXYlWb)H#87>)iM zC7Oi>pTOZGXBK|b@HDKQ0?z{|)~FUEk&E~IA9tB8{7?`YAT6WcE<^a#DhW@jws%F; zGL7r3$kRPN5k>4QxzDLY_p1HI7SF>EgHQK)j9N`hT=&RS-bmaAQ{eTM#0Y)Q#kjw) zx;KFfBOOvT7|h@AI=xWF{A+V+k~^L(wx|aEbY#EV#Z39*gcSDY4W+lPW#HsnH{B6w zePHUe#SryFfEsn})?(Ft2(I5z4D`KbQ!VvDQ7$~zlivQ)5XJrsCcI{NN%c8bRfYewPUN%_VC}Cc_R%YV`Sso-q-5*tj$A|YM;#9 zq^==$p!VC{JTQ?lt-V9onLy3eEj&BZFelA_UOO2j^L|SE0YWp)GRwI5{4_kj=$6oF zTuTp8KUui_GNyuQNXF)We)&w@^20Vxb@i9iro2zkJURBUsuqka-Ej&cUsKvRf6mm8 z%Xn!qU2to-=cTE^zkSVr>H0LaR#<~H+Vqm57^g@$ry9^ZfSvf@ldJjxCjQOb z?d%T64Gb6$C+m}>Dun$DdnJ(mQRy*mS1;ZEWE~Z5l7ZXG8EMH0$(=G~-x2vt7%;VF z8x2j`XX&T8O~|@_80jSzr3DtX?^jp-Us8!TsBug%68_!5@7EoX3T~if(*5t?4oZ>3 zl_XnClg{DbIERP?eEVprBC2=&WC|qEghvrWQ^Wo<>-EY2&-%sdLPJwOJsC+Bb$6ET zhJD9nxg6w4tn>IKn&4p~z{e^4vwuMwAU z6XhFK@@4gm;N3UNdD}Zam`v<2+-!x5n{g@~Nr9T}Et|CaS%@~-{l_RW?xgX?LfCwc0r^<*RALYig&1ORmpcl2ACLxws(V|~ zjYzt|03xkSN!XmHke}`rFhaIF_NW4Rw^5~9*%xXHD}X#lQZA(-DL-w;(#Bxl^%0tH zz}ZN~O4ngNM(gcq2ws`K6TBKnBmx<=3(s`OB@Y(zWp*C5T2SC;|9DeZGJH1Nj3Gkg z`T1Syg}$=E%K`FlvkGCAWTF+$!<@qEcJ=2cuHMU5*}GAUhkO$SQlM}&$s2WY?jj4N zhoAF!V&JM=RIK(M>tB8dRf!yTzSiS>w;p zT=z!kcRZ*~>)(4Dqz#mbua;yP`uXp7In3o{nI+1x;~Mijt+2Ztaj1zN%e6Nyr?+%V z^>;crxwr`{SY}^#7&+_kq;w0mF@`n!u|{MMYP~8CfFqoG3#8C@4SNzMO~}P|Uqs|) zW#xBZ&33}kB_tKo?`H%O=em2J9zJtHXv2^aVS*Y^_^{+qP8O?Nar?328%r1~Y7Ijj zn^`s^mmT*xnU){fLb*669SliBZYUZ=3&OB+D8;m6y=b~HI`74z+RSO<@O0xsJ(}GA z7TZ{e?txHl%)rH4EU%^yWDv2 zC{+7Nu5$|-`AmCdS&%|@B$GM9zMY+mQq1^B+BsugU>n+^wD*O2uBd%6gR|EkXizwL zF+t;7MWj~PV@bM~B9~amoTtPCzOr(OYYB~Xzw^4!P1A~z0MpitDsfx&36$dhnS8dH z(d}wMak8GXxAjdw4FuWVaY<54re5WNwx4cdT^Q+enOTEew%Me6H$qMrP9S(?$>jiA1Qm`ya1Y%&mkIpDFUUz1oI^Ek(L|%--6bL%0trH z7)6t$jGrpxJHT(a_ug1YfIWg#R^H$U&AT9g%+tuDU>=fmrb~4Akvj7S;qPksV7*d( zjw>g$l*lHg9;KFw63?_1$!l98EfRDT3xERy;$ynjeJs@3jGF-A=3b8@hmr_yLIq*l zMGKH^|^$$=BYcL2TlQ&}Q; z8F&wP<&nyNQ+oh0N!y)PqL=!~{2;tYPPJF9xnA^5Orgdh<0GlsioNggi*Fz3JwTYp ze$5+)x8=H~xpBv5#Nro#c7+b7tL3!-uQ3{$*nbtl&LPVps217XqbXd+EJEv>%eN>c zX~q!dm@zKJ%v18(J=r8|5EZ41OO0S=!tW~s^Wa*>1ZXox*f?dMD1>P=njx><{c(mh zE3WN8UGMD^ompq9aDx|tRNUrYR3uby!DJxhB~_deyk@7B`{r_T8dewQY5D^EVt{7; zn;W_?la0rOC1*H4%{8QIs+TkYnJphpxyp0XHOb2tql^k^Iz+616^8GAb~Weu+5M9; zVvrtHzx@X~Ql7``&UAuuot09Ql-#OBnUOsywze%z{q_HpjhxU=Sj@L5f5dS4_8FJ5 z@q=eH7ay*SGdKLIA8;M)M8B@-#DsER639l(XG?^8J%=gm99Vn0F&^RjD{T9+a-S;l zww9OQLLzbiZ^@DV8TO=I+~4=EW&%|{dx}!r9<3b@`?=cva%T~X{3cTfdycowdvkt) z^vMSbx?Di=(>eop=?3+aY@A&hY765)?Y>x^t4b|&F~P*-h8F)CodjnAOzyl{TJ@HU zT;1WDhm1mV-K`MFmB5fd?8nEFk=`+jSFMufk3TYFcs4_2)v*xW=8x&DBF-aDnf}!PeYsxCfySPMpcYQ6*?=bMY2*+b4L5qiz#)dClbFxYJ}0hI8nr` zJ6U)3AtNC+3NCe)MRkV9DD=XQH!kT33`06AJfwSm{04$F-BkYpu~O}-pU~yht$Cfc z;zG2vr0}4=VTe|akkg}b%7XT@Z2OzSbMY2cj|VB!PK9`TFeb-AySn(G*M7*PQ)nN_vq1#O|PJo~ijZTQ|;%D+VvO%>@^Taho_BBw zF-~$rZXPdLe=SCcQ31WqvfjMf*gx?27ee;kKS??1VtZi@0NLS${CrV@aJ@0>hcdM< z73Zj^JZ)@NB>%3WhT*-46QB#!scX>s|G?x)wM2!VI$xAZGwh^#ypomu&UauX4C*I2 z_ccHEU^xp^1@w0F}qEWA~X=xHHpMH`m0hov|-&lg<4DDv_wExc8$=%b9a@Wj{0K;or zTSgx(QAEoKSY-bH(Ri*e(vGaS`DzVnZbt8MKlV4!7cwG~_ez^)dvI=7Z^xVSN}+GQ zN*%j>0nX4TFr5NRy2VR_mH?OD?~Mugg}7ECt1w7%`S&(@^9oL#ldRMI@LBP-)jY3} zP|$&9+=IvRE-J@a_D^%tV(Kj?P(h7ewVK~wwta$bfNo(sIdsBpNA0SPU_UCJ-z3sa zyYudMmcvYoeGFo93y*Y1*@k%~K?X>yTl+g%&9i&?BIP-UVV|?8guSLnZ?=q@5prm6 z@u=TSZ0j24Z~be}za^1?JF38J2!Nf=kr&i=fII5r0~gQQQW>A?7o1pshtPBqyZ-wF zr~yQ=L`4*Dy61Y1(5&I28+bWD6Cl0$#iGjBi2YVfr|?Q9rxfU)ebht@Cj_;rRCZi% z#6!~{xgd>6a4Ee%0x${B5hcpTk!Cvp{Cghvqkg#xCJ~Mqb?xuzdog3#?xg<&KU|{t zt~9K(_&$}Kg9Qt2S*#XOWDhXB7}%i+d~^8Ngr+}U{(J1l|HTWW93do-A`^~J-v2Rt zwtl3u)2~OVvqoE}FaOd5tv4S^bAwuV6zo+e6^cKzU7$k6nMU7>iDThKSjyWuP5ZyJ;EApp+75D8d0M&juoZ^B3%#d#{CPfg)~S1M4bMdE(;oafma znnHuND?DoNudGz2Byh~hnCu=n_FZdHS2Bq% zp}DVEQtu_Dzdip)!%{z`c=ffOWyjm=&tK}L&_ibd%HFJVi(LclXx}>%_fS{|BmCVC zk`{x7p&3(7jU#x2?JN$!abw(+)`p!?W?UGMDo}GKzzJB=vkbj<(-(BoJ^@?~I^6&D zE=cWFk0?gCP!L_T?c+qXH{8bnopNUp!rJb8beNk@tA57Qx745Qv>T8Gu`475sJg!j zYEgMiwHAd!oKYQu7f1cbZaLH6=Wa)?cFbXJ6vA==b*%D?eKQSHR*o=zKmU?b#MA7F z{a9^L!)#AZ-YB6QWG-Alw5pGllUe_w&}j2E2}oaGLnU!V?!^W>b{{plA_XK~?161h zggMuL7S{0pO1GOk!Z@Zp@p#2YHO5hw!>k^#a9vs9K2U#B5-e6_J}j21n%*-EY6K}5 zgHo5wV}hs+(AF@#-|ANn?}kKt?M?qmvel)sBer^bNfNxIfG+MYw@iOA*SmK@PU@LA zI*cDMH6HdUE5?!JNT>Asg~sySq3z0Jr5agEaCWH@2-aCVf3XNoK7P4RJva|!8CL0~ z;fJ1q!Npt*SMmve3*;ntB1}1eHYa}up5RmKnD}3F?LMvP?tR#!N7!%ajNESoUV_wY z`-VFU?BB$My7BkcZ}Ir1A83$SOJxWlt}8gUrS-6Fi*5Kl@h=8bNX9Hy?EH9-t=Srh zVdKH>`+|ZmS%(e8`p^HJU)LTos!duy#xb3^rB+f`JN%RRw3b~)Y6Z@Dqoq)I5mSNh z%+67UiPhEy!IGwCa>5>M#NZ#s0BZtBREWW63Evz{bHkkKV7FapMrDeMXP5IU8-u@dtx#X9+xyMpaA?bp2o`3;DjJ7hP|Q6AcAc z{KTfb6fDxTOTgiLP+;$a=977HJwv})w)St9jRnNyM=&GnCF?6Spd>mAl44pa&fgS+ zf0Jx+GmcB8lujfv`^2ZTG=zom&p8U`21=bqeC+%8!1fiqZt8RTN5y&ji7RTfpgGy9 zmX>qrLPrJaKnwe?CgPSzrRIKnCX%b`bL%@)vLp=Z zMPLX%hJzUnZG;7<*{bN8y}DwKG^@*zKlv@pOZ4f*cTOM(z=G9t4uBfbD%YCL1o|0= zE2Y1YHPmH2STQ5peo|*7=6NBr$=_}C0O#s|)9J-vxo|D!8vYvpCGhrwz#kpH{DFdW&MzYc8TdX8#{dC6_qHb#cJtjafqrAt#5w-ISQM7sqES6#C$iu^Llarrnm zluaZ8J+Q_i^KJ+n3eCeg!e~@dSSCUw~#yXq+FUb18+#Tf#U=DqDk-DlG zwFFaKYY3bTjYNG0ta7P8h$QGMm;c~3 zKTx574QSVso0@hg(^rCJ17~)+E8yw%ec?$mmTmt#Lad0MHW@1a{r2C+?li$}Z+VTs zQ|aFzy6xiE76uLl*~4@m9x_#o>j~8tCO^@}5A-O4T*(py)x18LN-m}}yc_EE!i0Uo z`%eklIA4`Xqug|fx2+^+M&`bH%RXw9;+&9}=#d`>D6#~D8b|`_(H*cdcV-ZieItP= z(V+gLo~Uh$KnNWWD3Dc*3Ll&Mw(#tE9&n3iOvr=F%gtqp6em+t>itdUFx4(sA?Jg* znQU&Nd@uJP_0BL&QHBY3iofSC?V+U#T;6x4QLX7*Mz50&)Uy-B>Mfb}j zJ&E{ypIx%~`{FT2-n0v8Y}p>$X@`uc5|x%PJa2#aJw*z@&zHGx=UkC$mL4h1`*8Cz zxI6yIf1Q?=*7TlBQL@6MV_$?GF84lQy^gi-M~V8XiZ@^4MKdO}{kGRuSq016RBlv7 zg~x~vwr`xhJuT^4I|H!Pz%f@$1#{b- z?{a5gw4oZ8w)nI{bk$xr(&I@jO(F5f0r4-;H)0UpoX65yowfxBLHQ9A1Vc;4Kn4df z$dcE5-4O>wniZ-Sw7J@jaYVf@RhS+BrMhwavd;;ztWU+&25Qgy^`r72x_fLG;!pZb zXg3*QXpWENZ3SIEGH|O79=F4D;+b|bq|^VfK7(dMMW%CR6A{2C>w8r86#L-?7Y@*> zkT=G`Ye+()VCZ!L#3}gHG34PTGzBrdR72U{Hr78P5bjvuzptC2+Ht+r<@jgFZobkz z)b@GiV&EH#h8a{DBBJxB z_ouT)*XO-R0Eq}HpEzCwHL`Pl2(Gb{@n_*}i3)0Tm}2xc;Ld5Thn^5@hu^I^Pc33+ zsl|8^k1}k`c>^dvKYaM!b?|(%!m|bK**0DdW76S|pH@{*C^LipH>Q~B;QM%Isgi4! zlJ~WC5ya{qK}(?B^g>{cjhK>33Dt0g=Q1}D>3uAMemrcUYORG-^L8p$I-wA+F;7aB zul9i^PFQ^$?r}G zKG_;wA64L=EjBtZq`KJfGeK5fi znd2|ld~9J>H5I0ztUFa8t#`3+EQ)2VYG>&yReu78n-=mQ(PzZVuyS{Er~wDvs#ApVK64U3ZEj`8`X@-1CcvfpiI$l`cIcDfdX# z5zedP^>>-J*I4K0$E7tZXjJX_T1RimPn~RJHxp+*(%qvZ)MPSeT(1@!8UH-NZ_A|EJUNd0iA+40ytBIZU{bOxB#-f=r$3?GU*ZyuuCA z$S!WHg6$D#h%x z8PWyVKc<45WMvPR<(eAET^rD967)3nTE5_}>{uK38++fY_cg=j${E)t{P)PbIjl@z zbhEr-;@%HYLnVH$$~+nqo)sm_1C}a02?xyee4zB7<%(kPJhzp#$+$PL zVz)2*%xtpA)CmfF4c!yS)d)+Lelt(j+9O+QiGmOmY5GjN#8*Zn(oc6)Fm!A#mF*13 z<-o1a<+=I~z!`fQzIq7!2)QId7lLhmy{yPPAaEUZWqj*E^n)|Xkx&U2jk-$OXj{vV zk=<+K_a03oQW@+7NDE1104nvd%&>}uB4SIYqo7$^~*G|EVZx% zgBy&N5G;_25s*#SYh}i`COLN6;1}rUEtif?r%C*H zF$1Rx%`s<&XbG0PZHA44lvUJt&n?N2ZgZb8{oRCn8=E^X@w(f)?ZguxKB9B@^6v4s z9a!!CwyurGC1{`7u5bs{Nv@?G=#Xh*qf4JrQ=L{a(hR%{M2}!1E9?*dhC{z^HAm|F z=Fa6ioa)Q#6H@N&7FEgAqR9IXp8VO6xEaj2cu^ejGP44oZ1{x*mYtNA9>I&HZkmjJ{GL!#h%CJ9{-}8lQDQ) zzh8jH_>er ziS{J^qw8BaH^^_xH2kzvV84y+=Z{AUb=g+Y2DJZt$jGQG3G%qcwN)p(!iKFTCS|%V zMwzsDOFjQZnp`012*ec%Uj2zj4)&v_aSR-t{MjL=ldfZ2LwwNwisvQJH9gn-0mIDV ztW0w>Vxj55RP^3u=!HA>g1TG8?)0nHplkiBs$T~43ZC)u3tAegeC)Y}jZ@O|bjz5Q zuK-STVB<1)3Wnl;GRBJo1G|oW?9n%;mA?|boZ>4}Q3UV|2T`qx)xpDstd_(z!?Q&p z`_D4!Wz`*?#t(4o3K%bz#*r(*f?vWo z3Mso|kg@H)E0c!V>dLsSm_MBIZ>vLuS{jB)HY}srm!xRny07J39Ihnuo}r790M`8R z3qS7p@i`@KU$%5DgzFspDKP-r^WRr-v$)kLK~oTk%WnJ0X^uAA7?@r&u~5e!0y?w; zB_$=5s<}=lm_Mdk;BJ_hn;(7jaemNNvKA}=&vgS!oA6oir2-ByqCi+#iPm;L4XHX8 z8R*6j6YcwxQ@-Pi2b%m-#pe`I5AsZLL)T{B0hjry7y9+YINcGYG4a05txvd%m0Vw< z=zY)-^_54D)NGAjeO+b)|=UXj%&QZJ6C-0<7bxWznSabpmZu;5-P?J z*B>>Beqd>d5rkM0+YEb_HM^H%vee0BW8P4RP_v^`_M!0*&Y(|v(VILxAA%c8@2n)z zMFO|St@l?JvCX-jm?pXq-^YVQj5hH*)lhg%O1ArPzhyUz#B1vh=#v+Ua$hA^AKCZe z{rd6A*D@(~H;L(jKW~;k_nQHpdpX1%7mr#%nOff!i4RlF^@p6_7i&;FR#X1&epj@d zjr%7%{O=9EM&zNw&vJZF6>jME_n3soM`)8C6g>^n7BOm~(Q`Cmf7i%)!0g!bnEm;m z;_JBAeFWOag${r|Uw|@fhkA94@sp$-Fi-l)qYEqbo4L9eI*WN&ybRw z&bBMTmea7*C=9gT14YZBQtu0tLFX|6Hhw4{d94KXR5gp0S2l%42Uj!0^W{*WNFEqb z_}|5v@@8q&OE4PTu@o+%(+vcVi{Gn7miLnfN*q8sBtZ?%W-hyl-t)N{&2i*Xn~lXf;hVu4@xYi%nu|s$0N7?OcE_Nj}!6BaXrS zldV|+g-faNsBLCzMq>R(BLd%I5t}8PdToK3`2E)_Y@R=L;kp0JOW%rVuzCCledQxd zkD>9vyeNf1Il59EL1F4NiyBmfxnjojNuD?iwO|QILZKiKa%b@TR?dLrt1;LIKc=k- zB1x6jHAUq%Ts&G%d%qnMVNE6aLCbOJp}K^5BderAot2~m9iIv-LE}vbV0(^;mE%A# zJZA7c>kG~>kLtMw37df04mZtTKWfxcRA{atGIQ5(EC57-5ct<=p!Se0)-jPn(7Y4 z)wAtC`{Sln!~w1G>)rlE+nh;3uMJtOCirGW!jBBqoi+PChK^3b*1lGNnF^o0GHuaxp?NUd6CZn#$ENOJ{UQR zp>kak+_m>F)3fg~e(vC8q)~yAX5^T~zweLs^;=v1yk%jMv~9lpOjU^MQbXO#k7;4Y z^?6lmICgbj%}YHFnMu0*m^nyK|JbO*8n{tj-Xau{sg*mv)|-KnZ@I(!L3O>wZD(p- zgA*B(jo(RZBIGEi6_tYA^Bm;?1NE}9wh*Ad>=3&Rz+h-QqxWD}oV?28^8wE!XrFd7 zZuS@+{jL?ko?cH@n=c3M8FzSUa;`}MuQst%)KReCGDABSxW?@fzCuB&RabR& zn_KkrsKnP0)}^v_d1^NCPl}74?h0n~dtfOef_V9F;oOXAOAzJ(&MsOh_0giU#=diO z`Epiq&Icv4>U7z3R3AGA&$mjr<_56X#49o0XtL60@rzgLtH7s$te`o~ZPuyA@o z1ah_;C5~2KDLJL1kwMyh$;*ubX>l$4Bq7^f;rK&WoAmEa|=8g-*2oh(>%3DmU)Q(!GrzZf#qOar~KFQV-PeNJfJp z=upc(_f2&#PD0b1r;Sx5ZAb{R%}ocH0&I&#c_>Lr`_{04#rgt8oJ7zjX+)at<|8*? zPmqr?rKM`~P%EqX+ZXF3bfsnQF_(-)-JeCi;#3VbbnlfmtFy( zA0S?cil#eB5=f#&sx6z5qz!);W$C&&lZ0=p+Y?jQs;V8xHo=Hk+TCL57=wq@T5(a9 zD9gy&ygbZcD5VpDv%Z$7D7}wRnWoS-Sn_ znyHc_aD#IXSEff~DijA)#E=AKl5)s` zfZR1PGE?Cjb>DDqytVxK1bk7{Co+-(S`Me0uWW_9;yE0$h^73px6%Ab$VJbAXKnHd z_Cf2Z@*kMSv+^hJ{dc{T93Dj8d8(*uu>OMqxbEL+pStxdNfz+Ap-Y1{p7VJ5lkeKZ zkD|)BKND}cwLy;mY|wxR|E&L_P43YSeR)RPo^l6&E2}mV>T{BUM{b0y3>lKW3A6HO zyH6iKKhdKOt?Si2SZt+g8wE&_eCLhUL}LviE2n~voMiG?l zzHlURheOkx#`)^)I)WM7N_G^(lM|0qgX$Csq9k4SN=%}NpAPleIiuT_l;^Cma^-s5 z-GvCE#udEaP74|W+suIR9+jSGN#Dqr`0+i&y&E;kj`;CDWqMV6XBpj37^>xX(bKqL z$XYOaB!m6G@bN3%KB6gW6?!nl+hCv$J)7*j77{?5NerW`6eor{TwE|7ND$DhJK%A; zL-LK^bpN03`&-W`FJVMof_(MlkB@DmqLicD=1&$aNt4{Bw5KyRbKjGW(~#dICeI*J ze+5zAS|aakEc?!Oyn4Zp7O4-rmKRv_H`T(`ezt<^cZ`|}Wq7y+g`>xa=ceFx|6UHj zt!K!2Jw|TJ;gNLyhK*l=S0zY0%QX<1OQ_0?zUSA=*Xj<@N1c7H*P9k*rOTo>Gp^&J zp(-XYJegtbi;4q@5N}6`KP0w<(?JNh5247QNz{@kxccUiFflk@=FOPpv;2;qs)>J- zIjof(ddp8!$9ZGc>yPKrydqWp`8XQ%lm-eynM6^xc;vTVM5NzoDx4>rV$H1G!bq@n zQaT)92|Hw>C826%_qyQa9hqp<)*Sps*wi1Yh?|jpd!h(^Cr@~-|L!Z^mETbWm(=&7 z2ta^IaDiHzlYVJ+^&y&ye4{=A5E6{$@~vCHGxv1TrD7z(8NJhfp?lYAph!8aIOlNtcK) z{3{u;!vVh7C!ki9E->&525~&4t@K@xKA^@3#xpxd_1!gEl>F3(hFG?= zzk!Yca_ZTYWs=3lix`q`?Ftk>r|p2~5+v0h^W440eNhNb*%^zG&6qgTIwdig`dtvI zbC@z*b$!*k^YYdy@=6%&ixU&YlCpqmMq`DszHIr+=5xzCrsxgRXXuet%A7xiQzCqw z?xJ)_Ve(Dp_3>bcz2I;)boo(>@;OxY)L68o?0pUuPGEcJ;T5)ot^csdn1-DE>q#|3 zo9CpTr&?aX;2t2<><`0z$Z<}^YV zRJ4T)E7Am|OHmZ$oM0yKVIw8~m zDgW`_`@fktZ)Q&dXLj~EIaz0w?^|o_?ry^9Qck4fb1U51I?4=-Guk_?464?eo9jFa za@(DnD&uPY@^nGJyg`^br6!7Uh?9#iMv<7Il7h3Oc_Qe#r;>`ICkgQaIs0{cWFHNu z`d>e|*DH7n{E0>$7x|{ur=LHwB#v7)2ZO+V7~pR@^u!;%EE&^u6ygKd8U7DxZa50^ z?Lk3znM->~o>qW$qTs+efL1+LrTY`(tHIV&=EI(tI#Hqib#f=L;?DkrP4ag}{tBol zw;aunna=bhF{~P|{O5~uKV)t?Eo?KvN2eQst<$1etLc#?#<CIaK`bv;9MWY`RpLHbAU-jTs zH+QzH^&~pe7v8PKtG?xhyaOQQa7%u)i{Erii`QmMtsP1S%ElDSBtjQ>hF7Obuh!U2 zz3dod8ft4^tgNZgKWRR&@FZZ+FAtZ$V?y=Cz5Z%@+Duoi=4PRY*9e}YHlDj(?_d5H z<9O*l4hQ3duFhhf#xkxbscd{1?vuN@l&~*XGlkN!UY=`k=?kM);2V+MeTbVaL8Ge5 zdom`*@>QOCde-vYvk<`Vj;WxF!+Q#@PTLHuf=(w3^xz6Fb;k9I(d7l*e0%=6^@Xvp zUrlES62I~pvIOv9*Nt@dpd&dJ7airX@PAMd&B>iJHqB)N)Esscf}1#(_~gGDk5qq+ z=05yEha41Fz>{Cbm_u(qVTPmkUtVKk2Hw$XV`japTEMr9b7#8I+|`9~3rBv_*Hykx zNl$y)ET^oaI~E>3e|68j{k@=md3kyAivp9H53iN$Zbm&YzYrKKx*N4yIap+oZ`$T8 zP6_2M-8d&%XH3@n%;-t-C~g!J2dcM)_JcDIL0OU-C%e<5?S*&Ic$esNcW!Of^{j99 zIkAb3s=$Wav5(S;MJknLq`3m$Kl+finR2bF=SJk6mUf+L-)Z}|tgH&RdTWtPNyDma z^Tc)EK}Nw%e`^&ouXF|Q9|x(wi~^2I2zSeS8C;n-XtIQVO7 z?LJZ$E65gtL#uu34fRLGq8h7pSYalU%INjf)LzymBu`$TZ<%?^s#YtbJ9c;KA-%{G zu*t-pc_Ky%>CONOGUs3mIJEH$Led!kJBP~R=dyKPF7bNxh7mgT7UWl{(0}7HdW&yQ z-i5@2cp$;YImqQubdeS#JAUCD$%u15;H99DjaIsG8EYbl(8OVz+^mK22%~nZF~cHv zxVa-<9(|#X;fDMgXq`cUs$9Mgc=aMc?Rs(d{pDZ$@xe2H*dLpyT{M@9`Rr#jrr!!)9mKTLq}P!I0D=;FaJ=>f4}z% z)d8ANfyIIeRw*<4y_HQ%WxF4LW{UODEbhNH=^RSD|9JdJ5~G#=ke2t8M<^Gj)V+=R z4k^VUB?ZNpujW^yP&5L-wp)Jzz7Z-8c5$>Z;?Gp5+6z>?9)>C<>zv`tMV=+H?O#}C zTdkQvg1DDL&;ipf<#)jc9v)m+tkMJ~HZ~^ga`}8ao-kpgcl%JJ!W*u)reFT%{1Z)S+K&yiFar!;esll26AU-q>c#3Z0td=HEKtqIOWH zUvzU4dR?illxo(0P}olC&6LJLQdvs7dX*g@F#Y4`ym5*b2BoO=atta+;*lOhg=2&@ z5cA11>;&tMkoZWsl%q4&)69dAw*~V}+#CBZ^dvPn5NdY2n%s1b^4`o60czhQ zWWoh&m0q z=&2O{M-JSPmL1_pP+S6a{EG)Om8@Y zy>%(Qm7}3h+i~$Mh37&>C>KqtP0n>$Ji$`<0-k>q-HDgoKAA3ka^vWab;PNdBwD|k zmz`bv4_Z`=gO_jXR&)SVDz4cjyg6`vF@-C|;=v#%`^OOWx*wdJaAz*3^4jQ{;+3>d36Ru@3c3kkuOwfVw7JXn1t=144$KJ|30Np9hOw*#71x z*n46`ww}af=0V4!2XVE(SqP0n$QZ9tdAUOpJZkF;!R(>+QI?k*H@7?uY^*D%w_sF3 z8-MngXBq6+an&k;ukOSed+)s?J{1c3Na$nAtsbqphJ*edxCQ@D6Q z$zx)C@ASfbrbhYxDHuVCRkBU#CE#GV$tPacdA^954&g7qu5dEv_^_*1GxtnYZ3Ll1 z8}no%n@|%=i}mdkiE=Az66RYi<1W!^A+_v!MRlILG2^44hB=Q1Rl9!{>BMw5n+I37 zAH|%#U!*V>LfdwCQQZzNRU5zISzW}2{~~ELWgwUNefRelvI8!CH#XRw=c1|(&nK^g zmg0NSgrRZ|w~5h(^Ua;hzF5XJDw3<#ldy+>-rpd53{rD)D)n0Y%w*${&nz0!c}>aI zufJ@vJSDQ`%P9#R(79A26`LrB!NL}r^v-X$@zraMiDGfnyl(R#3cH{52;9DVpDim@ zPjE2)xThYpH>5v(I*-JedcXGqAHCRRxlnyk*Pl2WWZ?6NHhI|>cjane%%y6}vF#(? zhxI9xXW!=-)|gY-X)wiSyQG}V^`>u033O!dWgHj2A*rO+F%a1jb>dCy-~= z;U_~}9piuy;uEL;h}aXYib9 z(dmp2v%|NzHCnE?KPm^EA4GS^6)_FhU-gNT%=Z;Cm7Z`8LA>+%A(`u`^Y@I3n5@`) zFQRRL4$mUtM;C>Q_7ta^xfz9vKo_{e&zs4tM@gah4wn?3xjT~lemJ=e0^V=j%Os%c zw->uSug0ht|7)f(;g5MizAsZRzKs;WWdg45+X$z;QLzB*FN-*gXM0ltybwP;;BJvVY^O9DXRIkVd@?a2PGadePL#%|-^q^rcznF_kV7-gcZaK& zKf3#q;QDbslwx~OJmNA{w~XoQC+)^mb`trUdmkeiPI?jCP6eRyuh0XjY-G}c)b{j$xoI7$V|-i{ zbu|hE4&~XXz?ENRe`v|kStJGEb`=1Zi3MEZ2sLAYW9KLkHN4tMZ~7K#{(RMGQs>a% zo8SzEhcjL7f_mB}B8% zPj7p=S|OCNQWwz&+>nn9EAqY?>|8E{Vpm(0wNXvPh~H!9ZN--L5SosjifyWXo^2R9!a-+VVu5F_weCm-^x6`r z@6)W;Nxb|!8f_c>dl+#7km@uo|u{o@b*~#OWucecWT5N z8T}lvXlfXPrL9wGZ={E@%XF2}!!7~}f=?M4d)>Q9EuwaMG2ok%gkd%G|NZiE^?FCu zfX_eIZhFK01d#vri&wD21Z(T}54=DIFgJFal|rr}`gg+eL_A#i&3orkSK zL0=ycFSlZBT6$7PVdfEc2VI2ZZyVD&!P@g3p+BmDymP$#=cEyJKYJE*kqRF}&py0` z)LFx!yr(2OL%5WaORkPi&}{w!inJ7`?tZP~`}jTn4lxb)6pD*`^z%A08ahKVVM2pQ zsFz`9)_6|pTLS)>`?j9%Po(l3Xxx&Yx~V_JlHWx3#fyR)>SO|}uZs-HsOerNO}rO? z&$zb%}@8%*u!vM#WB_H0RlAcoNwDre?YnCY>F;4!( z&)%wromX$ZJV5ct{Z&g@y1mUel&Ix!vVLLsOS(}2^*TypJWxA8&rrs< zg2t{tDS=hRw}zkf#`YtQevOXl5oFF*khso?!q`=z#RfWhiMkQ35&Wd+90}igXOSOs zp-K=se(~oL)r)@tT+=1{R#>`%>b8o;<)O;Q0-nZ5B@j<8yV1l=4S&vA9%b3&aW!_G z9UcYc4GoQKs9UPC!f&-yESDun$JLNmYw}|7+1C(Vj@*YvHS+jonnd|$ml!*HvK+%q z%1HHaJ@s7Y%~V39c93Clw8G8HLgR9*ae-dgekm7$p-2zdPt-^>TI5)-XGB%=d-2J( zD&g3*fWQ%CS~RK|G_nqdC7e7SbL0=g`x`!*Q{RBYIQS9jjZ7Rt$>SNoRoOtJheUX{ zwM4Wcpas=j7a49rB#it?odPDC5UI<%TP}sw%ChmUHJSl;UmND-ZPib|ekwJS3YOy| zv_5x}?->~tEw58T>R*0H(`+#F?S-l0b?H4o?brbr9jK zQzK1wnhjY`ST!fhd>zxS9$MU_mhKQx&~UW$e>kc;K3m^yQ&|RcPiP_{$j{Jf#+08_ zKH0hYI1+)iY8i(V*IXi(IIhXv=%OlJlE1dC_A!!`6a=&PWqWZ!|LwtbKYqo_n;QlW z!EA-_abdEwkt@jsmiOCGm+4;UoKL>5DYSTev`|>mf1xPm3% zaTJ`98{uEcw(1%9Z>v+oUGNKGcIXCaGJ&em?u|^YryQ6)C5p|4a=q2+Mu2 z<@Z;qgd5cPOeS%AQJLcykeWpjY>*MrOdauRynG~iqR3?X14E-}sijXgc{*2};Wr8! zx=|m?TO9F0`j+PVx(*s_ff$*y-7hWW?q)(7eIoqA6TzRmVDUf>|K3casg-}x$`4;x z%dnsYIp6w6R~3KyBA7mXy+HiY6;#K=&tD%AK05ZzyLZN|r0=0sn0YoVttM1lXKU!t z9JeN(GI{@D&XEbt?T49$+CrwQgMGrAcSj7-ry0k6)v|2w8`-AuT8W@;e*RnJ$ogG5 z*(=-Vx%ecqR94vd8pH&b_6N+j_ZIVZg?DvWMB#ViJn6Zg0-zEcNIg;`iQ{LCH+8XYLs?pH~O=jNOdwfV}!`7@HmN9E70u3H|};c$?ES%)Z}%!TN&%u(3z-(TK1 ze@TO7W0?@}fz6|<_X;_KgB@qfDuk&Z`7knp?-VGB6UXb04qX?l&YwU;f0Eu3-b;qN zCr+H72)zRj2mTaqBFD$c3OUn@V7ShzxRTa9w|281;ZX=kvosZqCdo z)v{8OarG1vb@8mpZ`t;t3i;IRaQocv1$8z)!7DY{Vs?+s1Fuo z)WFwe2AxadxIqM!!dK=X?hue1k;@IA0^cyjBYcJh7)7;&;LWN9x4c3+GxOVlS zHGx%bwTdDFhD9?JmlA%}r@k23YpQp@*pib{(5Axf4hg=#<2hLrqc(Xd-snpT$$;rvbL3r2e&`(jH&rF*XUXl)u ziqMcm%j(>H?QW*>FbGrBy}eLYm47DYM2-M_lgsxe&Uy+ZEG;>24iX^?zE|}4TaHiG zSNtj2;ZSVy*3>=}=WyNUE8LT`lS_YxLwn$6>u4?w`+Uj;CF5%>wJpDmd~f>_XSJYS zTJqoAJ(JN zZXo`v6A{`;u+P+XZ_1SdlZAef3UIl;AI7f0Z)7 zcD|gSa%*)bUnF(|hv~6{1afi2>np!{%OYk46t~8;>~{{`2a;e@#N#)$et3XRi!Aqjc{UD4^ugI zS4dLwAo3D`Xt`&JRZ}?=xC{yhAFyXX3Drn8k=7uga7*qI-}ixxQ%`V|{Z9+S(cJje zzF09=_D{KUNqZtFWDO04@txCya_aKpo`~qNzA#wwBh&KnJN~Yw1uo;}$$ioq?KyLX z@ZydGt;_dwstxh5GqY^E7>*h{=`Tt>wQDFMA*=J zWbdr?NUuw%=RyEwME;J)%&13X_mKlUHaJN01fwK}-yyHHU6cvH#{Na1mJ{!LwuQka z2w3XI-QdBC@SJ6&U-e^pzrR17Simqo# zBR-n`V5R-`0^SMa;$@*rKN*%5vaRO$rOVLBvpY-cEPm?a=6-p5uQp-1T!J|y3XJw8o-y6kv@C@?c)A_Xq@WEm^Kv_z zDF)H{A)4DF3y*HEUD+MuaOiSY1$;tOdY6p(9or%yp>X#h{xnTMdcuPFZ|7iw>&e6 zQG4Ve5@s-Xh^GWjZEWf|X!<&D?42YDNLk_z6O+r4DJw+9>ZiwG@4yW=RlCY9mh68pBCX^+Msv{sW>U>u zrD6Te@Db*2x6$kxTz>cVfaBT+_xsoc) zthvwDk?aq!1;zz?>}!zB?A%_-+B?(DXM2efrU%gSYCzvU+4KqtUjFQixT#LS%ea93 z#o6AHhe-6K;0mnoY)~-AzN8ucBHh*QYnhc zY2ijf6~0C7L64jirJwsE)lzlzN$WH#8~YknpNJ{U02((<1W>Y=`wJLR1N^P~%Tu>B z0tqZCJt)ZXqY#2~)NYi0{pzp3L6uowG zh!(ibXV+u6bz9zprJQ5m9gLn_!!=yUPxseY0R5o8v|ykr>l{$}rbeVYOzd^$`M!lcrC-`6Nwa!Pxg~LoQVvrU9B7{oq$WnHOIj|NZrvl*ndw zt_f^>ONdodj(xA#RCxc}e5YFupJ0RtcxM;jzPwo1H^JUN^!JU-&ck+7IVrqG_GT}& zj=6V3-U#tCk*9^Sj0Z8|zxEjE`(;|<+$2oIJGCX+ z`ohsV!gll8Y0p>JSZJn*iPd?qr@K>jTd^%DL?Txbpwl=t2F|N$HMli+W*sJ%b|SIg zo1Q6Zdm>e%%EXFcv}0UGQlP$*DyHs20r+{=t5Abq+C3Ab#dFmWOXhRaHc0apEz}DS^Bia{k;{wR0 zY3_mRBNUeB=0Hb~ch)YZWvn#L+P>hHD_o=x8j09-Ii#$V;2b|7C>jc^?7jLW*0_8O zy7`NXgn-el{H6b(MBL8nc`_G6Hopj$kyjk7Z{sh6zli}NQCTI|qx+^YZ}gE@t2eC_ z5>}0y7z$u-UD0Bl9|}(EVj*X;ewX|Xf3aCxhvT3b;iu+hIy=DqMt3X|&7Iw7bwt^? zf!BUYjQK9q4v?;aWEiPji=}g<-}K&{x%lh~f#Dj?8U_$;jiw78J-0E+`KFBv)vkXYleo!PdMfH+X=t;9P6su6(9P)zxc}ZA7{H zrKGr2H?BIxJ0p(OzJ&Ha@k8z7wQ{aSZaXp-XX1Z8o}+6gY#H_qd(U9z+t*Amtdl1x zgh6~gmFx4z+i9j{0?I=(LL`R#zvu3@l$kdE#tUFS@HBFj_F>|xq5ph4!sh|RZdF@v z(D~wK2SU7kPO%P3kot?;r76(WQLXD5h>j|ti{<0tWR8NU;Il;^<28MrlFtkNP9w67 zE1!2UVP+NSMBGByML94L&TPY^zBat=%P7To1YgHoTS2WMj{lbb2vow^8SxNfm|yXi zwrw!Z7|?(S-w0U+)oZnb)FhZP1la*7tPRxYY7rsb@L;uo^0k_3G5UPRy0F>{U66Pm zdU}DHX=EIYXO^r1pPl~bdBt(wuJ`wm9hv-wJMYUy7-7{To{*>|^6XyttIOa`*?4yt zzM)Op3E)8koa6c^w_CC39-q_6X@|p{tE=;Y>iOqE4y}ac&VC6_@dbi_hTkCOnnMZg zb9tD6fv~J(YT03*R2J1INt!ZspKrH3)En@jH}3}-{XuWvB57S&^gV69QBc?`&%RNZ zs>>+n7aUpDwD2~z5&mc6K9ExYid}32$|HBDDti|1(3IWW(UU(l-ZRRby+#}WSorVJ z6qrR_HDxv`i@_f0vp%cS>T=1EpIIm_H`C}@ufI`hpdbe7o?`q_?}yX+UE4Vqp58w| zCaM;C%F;dBZNAYKF6X;PQ$3o=fv@0sPKU!^-Syn79f;|X#acZ%M8RSkT&8*QuEvT9%0)g-sQ)~t-2Yo0oF_S8S= zn-eD)yN#O|dwKI?jAi;0nTXVoy_f{CRlRP^U&r2f^jbvMZyB*pyEsg$Xy*lsrycek zhsTLd9E$k?o@;i+%)}%*?`Z_Ht}(rOwtiWk{{0va&WK0IhW=5sjD=t{&uf=d(`=8> zvHQuH%W>}KJ^u-&-RG$ku*qMZMF;lBc|xp8MWnfBI|!l5%dfS8h_L_&^D8QBvhwY? zZ?7?(Z>!01R%BwE1r|A>91IE1-&0X|;x|^h-y?p@*XaT4_s4kjk3Ct+y5_OemGcME zourv7n;U1va;W|_&y;;c{@m5`@AI#*!qy{Qxl|NQJ)^HVS5pqW8AIa{FWByHX=B%Z zB>NF#1lIKl_#hoQcWikX!W+{NO@BZym;rkqjC6zWF6BrVS>dT=zcPB8_T6#~yxhJp z?;N9uUtcRnBe3H}t+*8gCgNSt9OcWNfJeu+IWz6G7VVcq-&Qee3BMJP?hur=>O-q# zw%*%wxYakUY~(>*0HzWcu?iAF>>1XYeyA&(?|A+*3&)t}@*D8&ddWcTTGOYi60!_; z_MYVLS|lI{zjn*}Ts*94?m||(fD+6B9xQ(FAQ-RHL5R_c@hlMfg98Z>CvS+I>ng>Le;olwvU$r4g-CX52J{sM}euewbmGaUp-lzKF&7DV3 z6bei`p}3SMrkef`sji^@Sq2G}&^5$#?yRV$;ct*p8JlgW1AOJ#Vy#^pq>}sW0(?T# zS8=0bJ>{SP2>6@-aDAY@JQ@}mRw;GD(mWeJc?KSpy?Thdh&)xs)@+jf@sq=JlrQrB zrPv=uN)>7qU+JNuNR6K%*R^n4;Nhc>7Y$XN+z4{cCZDK!-}m zGsutsQRAIec3A6|6I*Y^{^kX8x{DBUUvX8Sh1>{ull&}pd?%03uGQP=idPwhwZ@#0 zO+W7JndaV#4+~PZiJTL&{{|sNN+C^2uuQMx2yz8$w08+5fR(+d{xkYWsteifRCG{{ zJY@*g+@AVX?hOkgyRB$Bhxw`-B)JTtPHyA>4C_OD_rKHTB4a0)`Q&{orSR6>xM`7| z2FBBN9%k^~3=pSu3&@`@(9glnDr9sJZ?U*5$UbYLLADhW(?4fr(^mMQ|mF%+%nGO7I*21(0b6XIiZTxH$4wrCLeO+7q zV@OSSVqamEhSoZkh2=_UG{Flgj`0wcSa{;?QM)5@VqNp1lk3NP^f?lyXR+HiC*{)D zFm2eDStusYe~=n%R1e0st;C?bTm1k#GZ2Ytqh0t`}g(wOYyWox-ISq4<#c zK)>APV_|xu5i@4Wy^@(GcTi)NsAqxKmUNj(=Mmg23Iq$+H(c4Q=HaBedd!Mn$Q&iK zZ07x8&N_tQ$`b&Mp*=CV-_KNB$7Exs&iZ^2e2zU<4#FE1Yp0ts5rzb-CxW^vTB~#} zaBXWI11RIR=how2e_rMH7uHL5x}Qd%_qBg!pml~YvY_oev|IEe)$srKkUgxsS`(iT zDZ$AolrG1agf-xzEfl9NH@%}2Uj96qi&I%MMORlY@$)zZ8T_C-t!e_kgTw$)&3~mS zEP+kl0zd`)Pfn*6&LIoa2LnTDt?pC`;Z^(s!oK8IoUl6@t`YsE^Dirvc~fR9yfX^F z{TX=>)zJODr{<*%?Z|qY>$Y-F{A_z>QH=#i{qXk#)C2~G3ZOcISdYr_##E~xmshuT zf3L>>^;$$NKKLN@2+#Xffm4>vGrWZ@mJoiZ4_Lb!x^GH+flxQvC&N0R_CV$j1~Ff& z8~G(Bp77%VjGO0)H<16}A`y%S!BjUXR2J1h#>8;&e(>kZYQ(3nXtQdoQ26t^1oBPA zi36+ZtBW^R7QEW&`_vf3U6-7~!N@6)IOPMXxM~K|1}mYj?z}8BuPvDSo)}LU z0G)F~ro?Y0F>KVn?4#IoTxbsLyFtAm@^>UR7P2R1fU#_HWduP_-oN{s<=AWK-XY|^ z^~6Bh)tmu#+T_wZQEJpTmS40Q!DQ1_qw6wV*<;O8lN!Q9vFCgGZtK+29U;V{{f^nO z2KnyzcCEuOqqX~k3hOrA_M;t_)hb%QXhDL1QS*MI(#y+}>Aqr}oQEQ2pK(VU!+jeW zkc5?@xJg~>S}+0CiR7oA27tPwURyAsh3|(5O-I0;*l2nXZ7fqxl8z>ib|!nPA4)h~ z=&Qa?)kLfxGRW)+0qYe^La_`(_c5(D+a=O&Lf*Y^$s-LWh(!m6uSJ@FIOHage)^1Fkh}RW&@Z@+=furr{ zI%+9GosPv&Jz4#OHs|;tcN+BQX17}x;i(6=TgucA72Wq%;$&%4oHw7Azv9=+|CeL= zkG+ATZ!74iE~K7)I5sr98$c+9S_E3Ivgr(n>@4arq@5^-W+-o$=s!+bFyX4^idv+3 zTsFcD=UAXh`sq!jIR1=H{MLHi;t)Vu05{3ZILtWI}aHSU-{7 zzAm;}C5}EZGU}}>8@2C{!<#{%7T%9Hwb6}F)MazfVL`aojn&g_k*F&ai1%51yNY(; zsek>&TIA?yay0de628kgYphRs#`5~Dz51vXg#4O=frYHOm1mzrL9-(@df}T7;v?8kTur6qMJjr;=7a+stemOcJEvNK37 zf$7H%8QZ(A>NlR)%z7S$_WLUmGhca)ESSJNExptSj`JK(vt#tg9&JQZu#h^#m=U?o zXqKLj4cD%PT+>ihG9Jzo4Xzkde4xb)DUL%GT43vV<3NIvV?XHnGx*o!N*NJ*Hvv%j zwq)J<*BBsc^<Ywu6CTIW8tZ zb^T=lA38wSzV64t*w=bg!$H}D8O5E1kM(_5O&Pa-t*5pt-ZE0Z|MaL#oVG)nr9yd= zFQtXbwA{{lXH0L-2W->XfacIPIFB6}(0}T|rsA@8<=;Ol6s-5CVs43+wuRs}c!Is2 z7)@Dj#MvGqxpDE0q%3Sqd!pZMd*;bUEEYQ9Er!o5_X|9ap6_3kd6X35v& z9``#!-EJ01p)`ma1W_VBR0JrZB zP*DH~on` zTik~4V!48veh>M!T;7j@Ji?mK+Mc#Jxscnb+#l|=XX!awc78XicLi7o84&2eMS_+V zLHN@P!$;Zu?Ac5#6K_b+@Wm{thHn&ndn3_CJWZTRlY2x7mC+j)T#SzDUvsgP4S)T4 zJSBYqZvC9U>1hM~pJKglP(=$i@Hf%mOt<@;;M!OI_8jp&)g|l|6kS8)$+hDt0B_h2 z%7{x#2c|ecEt!!+5}maU;XllsZmvZ>?+1v9$|r00ET2WabC@)oqs`~x)P}y{jB~x0 z@=TFIu>dC4)KR-0r2cD>7&uF{!^WztAO?h?wD6oi*c3wyw*J zvIj6`7BCZLg@twn2g*3SI38_xE8PS;sHtXBM>xw#H9rUqZ`VoOSw&Nljz(o}_}fRq zX9uECxo0a4lKArK_5wfEcCZ@j2e-l^bne?iv9*EYe2jcbFE6Ju%4+$mZC@ex;jhsT zedn*BUrQp;YZv-hHi=l&FO04M<$veCVnpVka|;;z6E{`Rfj=L1%B;yf6{?Izct^|8 zDix-1MBUmZwx>&0Q{9mCS^yg4ie!A2RUDrnUpukcRYQAo)5s1$tv7$ zO-Q|QwD3XSf)UR3i>e#*=C0r@U{Lf*9gZaO3TKs=kvewVmVk-iZ_p0zPnDD-Uu$!! zu%}#WVhA>wG<-~3Yfkr6@ezN4hEu$L{$oieW24x0Iyx=&y9RQG+b;90ph?n8Bk8Bc zV*kHWl-ivT_4ZTL58Rc=tJ@Jvh>@Ya`^y<82ZAv%ff`4qw0(*gfdaE_#(d>9zVUZ@ z`_fz)-=D2`mgE9m{oUHuzRW4YhWY_|Rt{UR`f>qHpHVCi69RJ7k8j@&V@5}=e#C`7 zEZ@JciLC@x#-q;1$1#CtSUw=z@}`(uHpTEn>%VG^8>|pLRpXy;cPuE80<9$dU#-Rf zAT##89eW%q9jgAd3-gh()dBlX%;Q!#dNGB1n}HerR;eitBtn|-UXlq8kQ3tx78(%Ng!Sjp!$hcEx%kgxv?Qn;@w@YIDeStSDg6bdy;$XInO^bm?)Mn{ z@(r>Z?%&95&G&`p{d+|xulw)O^>qR(Y|??06Mn0zUINUygmwVXWdsgZC$hqh^C*~mIa~}pkhYE)ZcWxy(k(?K8&##zt>9FEZUi?hx&k4 zG4G##Ddp{}Ix(keP-(SaM$WAv@Z8^OF@`I(NL_kR_V7NO9FnQ>wD~qKTiy{IK6Y_8Va`;EopW^<+*8Yc-O1Muug)DXIZzk3q>wgoqy@GM z_+iJHx!+mXNP`E8PF_x*12s}eVC4&8_KA5apjrxf7X#=*CT&&wX2Ly&Z9{8oAx$x^ zIf}319XWeIQcUQWcB57W2$_WBhpcbK`PSs&VXKf>;Zj|rN)nRyC=EOUN;!vBRRrLT;;AHvai~9?x^(!bc z4UMxJOJnSp0lWQgf%rZ^YXi6z(I7|EN~LR#S?QSJWO3pc4u$X#!l_f}p9<9YLNuNi1 zz5EyV<6B}_?&w>+!r{H|O|}&OZAXyIKx+7$ZdjjGu71+izeXr|U|DU&ry}6#PseD4 z?=M5KHw)^&zxpLNAHg%CheZ&ow**=FZ2VWY*YPvVeX#RNvn0?Zst1;g&6AO$L_A(2 zy#1}VI<6(csK$2T(w_Gv;WBro-qxXfWpnkIz`wG_LhGy+?uR5xOi9Nao0X2V`$$tr z5-yG-lqE`2P(_Vaiqb!l);D`pRfAt7W^@c|q&!b!^t@N^#0-gbPfS;V&fjgch34KC z&%>O(zUUg5&d>VPQ}#4=S*|TQw6XT>8#F6=|F`&>B?&khD6jh8#4L1WC|q?;v~z@% zMGqQS{}W^SU60d^8R8fg$J;4y@d{eL%$qDN6bor6)I52(!_TRi4o+#j^{XzdqAm7bz>T|hEuC45yBF8!2 z(Y{+#k*!@>awuqUx0_7NXWPaJ!*kEL00{8<0eT7m_+Px{(Xt?d>_?43=nO`msZw4kR|4Ew0MyqBq0%)x0IN}=4SkH*oQls8#&ge-^pAs2d=2?uY zv`kc>3qM*3RAfdZAa(C-pFKvN4oDRJ{pjOVS#T- z+%W+Mw+5IsjqfeHi_+~^1>*U@ZIeSD6kXb!YECDD$4G8L2U*4Ds+wLR-hIB^ac2E zHVZ9iP<4QsXk&XEj0xHfXaiB|;X3!w_c+uh6Xk;;Co`4ktAYWCcu*gLZEqkB$>=eu zu+(8ImqsNxc@95n*j4fdPhqS}{SL&Wf+3%3hw<1=^;2tVjQJ=i4#^bd=#8Vu#AK|^A6`LaOmUb_$9ATGuvH#qL$ZU-;qU5!QrlybzIX zA&Zp+Dii)2R`nyFI;t)hQ8}viazW=K(xW6-aIvw%@>LJK*b03!=e{`S zP*Cq3l9%+TJ}9s@;LaZObLR(YPysK$bOoXKImQ{Qh8soIAZ$8fR8`jCkB_C|VN(Ee zux~jnc@J{cbhJq@>XymRGAyJWg|BYf(TCK26{=U|3Pq0a=Z9Ip`D|I9U6+Rdw#fi< zwb!e>B((q>kpuw$liTR(Nh4QS8r_MQB?XRjrWLRCpeQsRvzOfO{&_;CAZ`1hw=j~L zUWKZ(nEVMPh^LI=DEit@H3W2#tXUr%?&BQX$=)r+HM!jPA~nTh5s$IGCrI~hi>%N# zSTwjhrLgcmg7I;%yswLE>ka-#!d*2%#vR1DCeM}N)l$J~J0{=>n|LJCWl*W-Z_Ldh zD;k?`TuGCd{3SNn)1uZ_62Y7F6_J}Q<8jsrK)Yfh^GCuiSYP?VckWzpgnTfW))*@G zQf%$~DNc9~9X7KTsS&i-a~C|BgWlllss2kWiN1LL4KC4%SG;vv))3Gk|cC$LMF`hIHM%qT?E_b?eZ zv*xE7q%d_%7EICGzlwsxN6r%&QOD;4sI{=mHNV28Zw=#DdO*w}fTw%%pg{kjMSYSw z2Yb{*&QeCRQVaX!r_3B;B;mVmRAK6ZOwV{x$kZ|q@d4tScHxQc9@sAjY*lC`5v(XL#_jZi@DV7Er3Zo`4}xWMa=_?R zH;gd1v`-YvFwMaJ_u0-QlXhO)gV3_oqWb`-!yN}VjKN4hr&7djQA+4oTtgUiWB443 zdN5Z5^!qKaZUc^zpRy+$?cyKxg1R&o2}R6$Z!N0&sLcQ zIfTKEGuJ)k6c(^W^5e;p5uaW>x10JPd#ttf+I5%RIEb!?%A}>S0|mF&ar% z8lS2E1l!0)(7O#sz+V36f552L3{}OWgR}ztxRU3hZbqOdsmu4~n2}9|ero^s6v~bA zve%qMROP-mAGFj>j$3fWN2$NvaSm}!QN5EsINHc>ILOnqH(BEgkE7-X09>i_}@8-qA^c{fO{FJ7M?qlwl6&xODsw|=*!AV_b~e9!&JDNQDL2q z@O=5ozQUiw#H%%s=XYssNem_sXghGWPp!meci4r$3)4@Trl*~sdWLR#VP|k=1e$g*%`iNv(&O=L*(ZLjZly`pmdp&RRPQn}h+1EgwaPoGxt= zMt(ZYWL#nq)(xudN8=I&u0IsTUiOOC`q(ubShAkT+yA)MJ=3YX5QyS40Fv^b9G@^e zsV2I_UJm=O4)U_m_Mn<|j7I_IwhZ7v_^5V0I){*@Fp?A`WaG<_-NCRcWzKHrbQytOngW0cAn?a7f~hT z=zSJ-G4mkX4395N=5sPf6)BI5FtPlT7mCVF^s1yrdoZW*9`QI0oWDidtZjsN`e5%uhF_byCDmU;OgRm1 zkVPqB=gisR`&X;z69^MVrZETX2eRewQoA(KQV3u|#m3G$s~H`5Dzbo>0IefxU3x6~?Sg*(7DIQUszr0{2m>q_TZg70@HG3*uB2FsR{ zbBSvl+Z*7ox4?k}7RU6}LDB_N8E_6APeRw!vd?|n-Wwy%o!i#{d8Vq}@GJ5|k~VQ@ zBMj!vgk%gyoy zUw(p0qpzqosTni*sSZJbt+BFAC zXgszsZASbzk&um-_-7#o8xPsXC;5gQYfjmnlw@iQZOWC%`8YItsE9>s%W~nhSOBPB zzQm<7BLzo>K=-%CJF-0_h}Y_h3dQhR5{C)P{*BA{$zqHzqUFhj?t@Wks=b!H5qWmz zCEt_EFiA3bImKZG_9AS0sM0*7Y_ z$pRvFYoLFT%(qK`j`9`ea!U+m#bwc^@e-|y>sQ?YY$-k_%2DbWOgRQjq}q}h#wQZH z=G>fHL(jlk{%54nnPO?9>F`$%o_P1@QVTY-Ss1o92vc&Q>T&c6PM_uo(pFT}B~#5eU*CNa?M$>t3M*3gD7d!h z@8nSc`#f5jrb%R>cK0OUa)Z(`-Df4U%TyOn8i@;#oKP#nWpBxc+w~rv2_+J2rQuA5O|O6xI>S08Vp?8} z+kyM`_d1O2vtliiTv=ovN66`ho&q%Wh56$Ry^k9%F`!%O;wN+2?Usv&jzY)_77tf6 zm4X#i=e@s4&@a2yv6_!xfCkr416}P|rw|omxaTN*KnA$3RBdtf%J;(j$S4dXZU^bS^`_D7~|nJ+G#ciS>r zIpnF_K2LYM_u-OWO>LibZ11`psnBNn^w^4YB%oa`&kPEloh zoW`YP1_5~Det_s92gVn2$s;;u$G#08_z@dEQF=q*4Rz58nG@{sM&46l73?3{OASh0 z$D)=9pn~{-497@n>fF(d-fMyNg=}Qc zed*x}*cY)O3OO1zzT7hsuKKSnbcYqj`G7VhPJep-J&AYim3f7Fo??wt0p4aoX}rc4 z3#1%$W4G#BciD^ZnfsjHI479U&p_w~pmBix?fC%jk`9YJiWsJfE;S$gc|KV9b|D7M zE~Dx57~yBte!Aiy^IHdzW6tW~46LDKm>u>fc@<|z0&P7^-%6iiqz<#zUSGaJ-p66@ z^w%g}Y=7s)EORr*+G(BpBou`URl8DMf6Iy}?@pgIbJiBjoysHY*CUZ11Xpf4Urv{r z@lCNFKe@J>Ozdq8jQmBjCvapty0D$mZqJtFxJIC&_D5&kZ7@z64bZuNFHO^q2nXhY z$(M>aN`X|x9`|`R^+PO1l>P}23BBmN--@oySW#5$Fwl6MdJBk^pi6bEa-nwX#;P)4 zq@-?6JS6FPViQl!w{4W}L&ZrKG2SpmlsQLf`RIK!aS9ipYN%xqqjL`AMI8OuPs(p- zt$W)=!Emzm%9j@aMh!ZU-({{s$XecI9BS_Tju>63Nxs2a_;PZND_gOwVd%XQ**Y~# z8QEC!hEG$ETJq$~7D}Bb=gY!HLB+eu&n4R35NA$5iwt$%Ya$mIpZe0DKB|X|-rmf6 zynMD4r{lF7gf#TN=SRarXen)C#Vic1hj*3nkZ-d-O_9c{(QT(%_tLfZUc0)~vThOT zYa;?XJej5gF`7^OXhoNAv-~WXMt1@s{pl$=`QBmu{OO-dn=V#+%MsOVa@Mc-*<{#* zvQBbS%|?E=5V{r_larpU37g{1I3Ve(WgkBhK?!%d9n>C=f7>UqT6&fJ4(_|k*V@aX z2lTRV2ZO4jmgnwe_R?HQTJtma0`I(O_wB3`4V2Atm7c(iE_6$q>2};U#6G7T43MhZ z!UFd)BTJa0>sUY+E(0{I{_p_d%c^~UJ_yT{R#3(0udmjTIH0AH56H!_Z@livv?P$=QhSD!*cbMe}DkY=(dO#S<#Cdsa#i{`m8 zW_6u3e|ck#ZtJHkyI;Xt1Y#n2`YjNLeig1;BC?weT(~~MPqt}RA0v`C784;nw_n?! z8>}-zfR&i%0q=|MtvW~e*_3)z=`T92>kCCr;Ta^3OF}Ht4=n~|L@laLxisIte{8Sy zSy~))S(>mURe)XJhn{Wzs-PxSFi4CsUVxrub$dY#G#I|uj_9Nc+J9v8(ZjZ+kKxbH zyss~$w-^4}A=G1im@7ybU}vD{&(h$@2Z-i*#n7+o5&z*{zU8KIW-8O81KFu4ZY+jW zuV8^@*v?XZv>W@3ac#rs?kAv622d6ZD3;Cw?ltS#;Eb*>nL=GOIDy}S;17VtLN+87 zl0Zz10>{2~Zo{G#1wpNf5M3>Scx6SIl1E7rS8#L z(ocdALT;`VVgFm~K??O8Lggvl0wcY7$T-#b=-9;*vh*)&@cy7hPM+~#mcVNr8L({m zC1DEpG_j$%(r}|w>}vzq+L!RD?wkVb6sJIp5W>cRgRbj#E~DL3nzWvj4`-~45de{H zJ%qKTxH_fDEcJT$O$f2$qB|-~l1sl^SJ9dS z@KpX(FpZqhquR~8}d@H*E&e*`jwYHlD&4MWrs_o3w!Z^%o_bdz&~{b7i>F9yPPbw*&6;5iH5|CMBJ_-;9~?Sgs`6kqoaz zi^ElmNEz_nkR0LHu^wi0m1>EEa$@D#l91W%<-^302L#+wW$e_PursN-;YW0!>1ZA> z0&@r@rLPxF72-G*hCvI3;fV{V7UStOLs4Y9L$-%{F3KOfdaS|Q|6*7%bjoAptcE{u zqT|>aKD~J++H6}wO>LliG}abJzLdYXe;g-7zvS)hmL1Ri}j1 zJ*V`$K)$bg5n^_Ze_&$}_ZH&VAE?h6vy{Oa7$Ed^Mbw1WD2K%i*- z@iQgTMET5KRfa2*ruL+kN_WrX()d$|#fqPo73metafh4z$h?m4<^ZW&KD9&*G7x~B zgQ8aIcN}5_D=|Bf%0o(P-T-%hjR=3=R6ma^y@}p_HL<1O<&dYvQYgAu2S8a58i7;{ zsQRz>@Ops3KzfW-w8x?OO>?@Nh=4>h=KMeFdpayd$t!%geIIW31X}6 z_lM21Z8n#6)Imbzk~bS1?{achcQ$a1gJ;6uPX>%Tv&dbzwRBd~zn`z0?6m5YJp2UU zL%C0xT3>OuTF<|f`?e}Y6X;(Dc0tQ`t?57jx#v4upeDePda(-(1ohHJ7VpFV9i8gM zi$z0$Fg8$_Bq%8K@f%3|W9|igiXwAfHxBKa@meXf$3Jc8l7fnLi(D!MKsqthKEN}z z0`Q`#fB?J`)Xvpla>2h!(SaE}QxM>HKK7>R-K6 zI0?y7Hjw^4i99#+svH` z%X0~EPWq~HS!RHKqOj#!75wc!O7STjgUUy~a6CQ%Re-|S zrcDa)#JG7dPumsRz~9;VM3XJ0n|0OlCcp48Hg@EtpSr}6d%ZiZ?Jssxq9%4d2H6RK zu}Q&k4#UKWQ9I_=iRviL%D%#{qtquMZf{<{T?`~#Yj;^46PCMZ%X+gdH8uk!rNz{C z%h44tWyMl7A{L-V1sw}%U$lM<7mcZE zLk=I$pb;Fnhj9*B#fD=65~+-{-s5#Id>TiLsbF}dn1iG?NX1^QmhUVc0E}8Pa5PeX3lHV0En8*Wm2ySDAvBh%Sz{Sa( z5{8cZt8}Q#r|Y1^!Y{hRMqTmwF9b;z;!+#Ar`C_r6K7>Z(zn1{cxp(lhpo3bo{%rC z=6QlUp}k-lao|Tx%oDS+;?|MR)27T|5VA1k9!~|paj%O?WCW?akl$#t$*J(>87_p$ zVh4H2O6CWz3ic-7OCSC>>aM$bE4M=j(NNwt>~4GAbG5R6xH=~~BrP94TpCRs@!^!} z&`CVF*@}gNaE06kJYtEE|7)TSfEd0fmUz3M?0!5zZ*@7rcm5xfb5{DX7YT(7{aA^4 zrg&g0Z~n7%_VK4H^X=QgpDgQwcyN&;5m&2Ugm+|b@#g5(?GKCw6du(9E=*-B_H5EN z9c53C8n%ytrkY4BV*EJ0<1j$u;2o`pr>&q}PNXp|D^o5B=ksgn^0<1NxSbpS5=}gI7njIiHQWM^ z3a9`b`s^$=mG9ZP7;8LGVA!VJVxW7E zDGB_$1L`u}8UgKY571Sd6N!zt`h2C8y=f17Gp7!&#{Fpx7u4O^orgMdn{>p zRr*fHNooWI310d5yIgjO)=j=PW`%vl0pjnWE$}~JS$QZ;NWv`R~3~Y*F)jA;HFFE9Z1FLe+`q zVNM&Jjx6nbcNa8&Pe9mt;u&SOm{kXxmCp*xWw{sxP%!#Svhx8&8GWO#OYEXTp&a2l z3k+E~9AvdXlvZ7i8GXkDyh|oWa4Kykx_x)XwmO`(T#EJsGgBX_!^}jb%n+I*=vf1=|EA#K7vXm-V>puEC6DIPDmg zLC0+_p6h{W09gg7um^lJy!E=5{<6BG z?ZeC}`ENgEcPpk|c?TlgZQ=QE47#!UgV;cpr9o^-h+E5&2j-k*S+|+6Paz#WFc*s) z>j0D#(zApCen=lah?))*7N#Tyx(s<-qH%4ZSD!QVMi3+!nS1lVcm2bIu`54qdXIWd zT4I{x|I>#w?*@9wPXxPO%78r%@tz*nn-Jnw~y zd0@k@X7SxBHpF36S;w*p#|?lY>_&%6VL}WpH;1=RY@N*}Z)QP_XvS^=clKZM#f{1n z;40$X46q+c*G&3Vk@YJwQYS`i&>e??X> zN&{Z!-N6Np(IKKMTbBz$bL4$~9(FGWHfI{PSuECdd;YfRDLZD!1P6Ybvm~)DQ}kUvkwHF`A(Zjlj6y zHhP+g3y!O+wB{sG|ka^7pLi3v`5vGEHxJJkq|YX0_sBd zLWeNw3xKPz5lLvSmCw+SQ>bU-eCN*n3N_ZF5FTW9pM+k|fz3o*#}5F(m@WPT zTLq;BIL1r2uf} zp%`2LON;HtpIXExQZIs`f061pfc({7r{Zx2=rVanyp|wW0%YU0F0g6IiE zh^GAptx7^up{8P)*0rJ`sVJM$kkR$GY0?LYKC^MDz|%g+{a=yjzfbxgT7G!vpmhM3 zRpV|dU3)2)>u>nEVTXWg(9Vo8JJnmEx(;7$XPQ&8%@d}gI!=o*@07dLbfIwjNviXt zxNVBlpd*o*^J?gl9@~-x37T751WjnJKH6XHYYpGgVXa{ZyxBTtf_1#0td00wT8e8- z)$;+T4P=z$c863nZftCVR0#Z!jfk&Y_r$_hBtJc);YZ(%K81 zc)hNn9;!-@E|MEWEnO!`CVB!fEaUN^tjH#X+&P2Ljj_m6oRtc9Bx))Di?^M|e(D>w z9K8Uo=hV!#JRE83bx)tT`Y}~?K>&3ytIRjOvhK+lcszgCx3YcR^Vb+nSGZ(KK#xfH zO6%xuX4VmK%iQGTW`H6&(B;Wja{?+QFX=!j(F0wA>c2;8PUwSj42eMb-_4YkKmO)J z8BW=}Lt23o_G?N+N2fc=#|HAKA_XCfT=(_Cq09Oy-)E~Uu-C>g$lcjKZjUHRF$DLDi@Nl+8TA{|i9Z!n@ zbxOE(A1SD;*hF0hJfavHIj9wy;G!^0iUh04T_(HAXA$|VkQS#CqKs%0G&Bv|bgg>%eu`;DkHQzs68S6U!%8cfH?noKv?JXWoA6(7lNCw}pvc!Bn zW(1Uz%6W)QC^)R1=ItO!FW~C{B;6DMbNq`)KgJ`=Z5q0Cz3Rpri=bhrL(F~ocnt;E1x3Y|1p71GN(+;&W6dZ*`lb3!e1OTrZr4p?*-wZG1eKq~gn1swj2 zu^IejF(Ny-no_RhK%CgrKRTuL7&w<$3^f9>Gg+l#Xun>3w zSKZ%G`S17;3xgVcZAJbRMG%d4l!LCxEqw~YM_d&= zn4NF1M@$to4~^H#$Q52c$ae>h3tRKW;)GJYQwGc}ENitsc_&l@O?Z3Nnmo)@%GzNtF5EO;5~ zt)od76H+w8_$Ti!x4xio=m6D^k!x7j1Q9fR|N2w9H59i-bvouAH^(rq7zmp94R+Gj zCjYa8&XOq{{xT?r1Im6|=Y}w4cJ7_5h(Euz(rqXVJ%TvsjfHJ{ ztrufuwrLs@q16IaDJfmYSNmy7<4YZV)91T;fs#5K=!H)0)n?O=doj#qeXj&{oadS* zF_B)ui|OehZmV#=!xh7WbLflthc3teTmvBq(r1ZdE87gWl{!1RXjyZI; zRSMg3Q0rSvycE9ujjoH@1#+b6X*xeWOoMNRpc@S%Ghcs;Byp0L|AQ)6dE8qE;(T<*TVk@p6`(1He>dNuL9 zJ(xO~9e{O97;oekn^xJq0RTEQP0bf3w85j5Sjg*ance05xU%Nkd@BPLV_gz-AR7l4 zcp^Y>*uT_jNC%=|Y*P#cBVO}Qh4C3(uOQBrd=4ql*C>LCb0t?7BtTFqnorD8Rx$3s zvdW$(^Zy69(%c4|35XMTdw!o&APBAKASQV!5+Gle*H-*&u&#(FGS=jqx?Vw<{erU@ ziv8C5ZJJ-63D(%>!!E4p{gAy;v7+o^<>`osw-y5>1QMoEe@>iA2#Bi@22Nzs<=6L6 zS2$>#BrSRFqw0wTW87AMHM(Ok&PeMGM~2LOKNAP6GPFFZFq?nq^{UPCd;E0Aux(#Q^H*pT`}pApwMa!vTYHW9 zh-;#F@r|+PxNL=pvgYYulMx_yP<55>BEuz#-OuMChgqx3|zOtDKI~w#y z#AQk>0Nsi}>D&5CG<3$XP~x}0%mO|OJMI6RqBkha4Y=#?dk#a-w^DkzD46DV5u zWFj96RhTsqM50#ZE7r9pCJtOH+kw99;|=GJFVoFjN=a?O@GaR=4;9p;Zh3)pG*e!! zB0F8H^D%caP-R#7FRY2AZ=LrqaQob(KbG50-XUApnF7C*=H+-iGBjaA#pLu$4VDw8{DMsZrGvN*KieB{d=#PJ6gi`f7QNJMt{S8nbF5pR*Lv`8LDe6F2VHd z`_ciSVcuqmK@OYv)L~*L=F})bSxG1Qm!!9L=o)PB$=+m7i4~Zv<-4z~PzIv4yq^}6Fed418N?;2gr4M&` z8{i6ns|@mMOJA)H^k>LiIry2mE$3aBS-Smv`|DtVbiVS~Posi04Zn9r6;@I|@Q9Ta z!;bK>`;snI&K=?z?%{x6#Zx-xW%1d(GP3a=-6p5H`l_9b^)&X8@sUz<89yxvmGOh2 zkV0*rV<>u%&{M&q63YKwD6mOez3Fhw)E{R6gDFE5eWDHXwCS&0qvRwCXb->MJ62+1 zZXf{Y0dV;rt}7kFaVvePDuX%cMm7Ehl!R|VSFCVY%;H|3;ID}$gQO}^MRL`SPbzWJ zSdY!>*sg({A1?z}&E2P6WHYlIClf%;>;@`KwtGVOW=#w zwtVE0ls&~!5v&BI3?oppPL!ScHryHF8nekD-`WgnxZD@2v$zqmTI?m0S-X{V;)_aI zP8_JXck#Q!?(v9uwFEaC^CqskVg@tXCok?gGF4D5W14k zJ6?v0-?*|B#8#NlsqkZTDsdK}qk8=wOx2@s$tmK}>f~-t!LGRL>w}uyf?a_U=@|N( zsFn}O^_!J_=T-t-xTrl3_oX)y>!+H_!EYl%+H14K98rR3?5MIkV9L4SS&A5ZaPBot z{dwc=P-syL{m}DJz%gN6`k~40cJS+x8D&b2QWvg79<4E3mX^6YrH$MEU#OTp)E4Ka zcb?AHx+9t30j`G}kS_`S#~i4^Rq9pB(&@~AOdVW&e};q(8XzdVk~+@HLX5LC4pZN5 z{2G@k@ax!2=8PX1E!6zBSFpwG%?2q)5sC5}R@(+i=!|HN=YAM@z9-*+nVfd7}>gu*z;4kePjEpR=xL?BTKg-hFi7xGQ zGFm`nI7i1uq7pt4ombWVNY^CPXMUwt)00C;atutof`tEVdcj z!PEx=#fJyt#aCC$D1VGZlRis7NEQW1MViF-59aO)rmve%FF5!7%vkBnL;3w*}A(KZf<_k4kC?I?K< zE<06DRS*728!PEs$J@(}(+VM-h)~xoej|}x%ZuEuk7o4M{`#|S!UbR;81ejEZX*H5 zN&wsik!kBU&66|hDlHm)N*rluna}seTXOsXpU46jHLK*1BjcG< z_CIi#4ex&fRyrD-Z_hj>xt-Q#TD{heRX7-|>7ZCldp)vrvfnbuD%hX=k_EV<>yc20 zdCpHSAalyV9rvFn`R8L0tN;-L&|cP#1t>;A_jE&1kjNX4a$M>)X>~Yr9>;#=%9VX3 z*CqG7tXh)?`X>4I-!lyO4^Q^))?(z(?;sEgn{R$GjJqe(z>@yauuP81xF0~;cSSu; zfdCYvn(L`*i>Ym^cdRDj+cF<;7=Ayxd)H=f!N?Dos}i*6P&@Tohc5}=1Qw6uC?kJs zG`#_a8%R$nEAqC%k zkg&jpn$CC14c=-tFYatZY&Wcq6t5FuuuE|=b|%B~$BlJaJhF(n+LD(?@9aC%-3CBx zLQN02AQ?cLB%WEq3zE4Ie<-h!SqRO7SCStGDypgoHW9Z|h z`0kJ5c5hUU&x-H6bX4Z{IUK$6c6o!vT1$DdV@X$+pC&fJ&e8za6>zuApyaC4BA!FW zUw?Bh+6lEH;j?wOx9^44VIyHr)^0&P?QDm4wAmG>nf)#pWevZ>Y90!8%Ov{|TC$T7 z(3vLgq^RXxGtulKBSnj4{)*e{`?CJF!g>^Ndpg>j3+ZyO#(54%MRc``kDHZj;0|uD zIr(8uSo$xzX+3V{bO6Zl7D7S!ar|65|+9t1OLha zcj($*YG)t!K0WxOFd%zCcp>If*0?Qg?sM?AiAQx@&|Dnf0%noSGsb%o&PTH7`Cw~j zhyfkM$E}|&pSt4`1WtXQ^=S>uzH^4x8J}r@i38u z0?J>PMS9!Rcj<7z4p~=Diz7zd@KAhQx&B&gZ-20gC+w3`+n@za%`C4|`*Sb9mPkb($s`Wb?qwqhMnO!U|R@d0wgtJgq5OY^c zjVNl*%MW+X%?7IA+cOYW6c`3QS-x9EzbbDIxHCNQh+6b?C4E(MCE|IoLU0%x?R&nq zl&*Pu0j&{2@d~8dp7sGYPVxS*Do(l2F(!_=`E>H?l=E{qit@ZAl=P`u;5Naog_m?< zYVZ6Qz2WaN^uTlS9C z`)+_5nh>~k&&ua6puwcPVOU~jYnQFUM0ZrNx|R;cmnWnG zi}kDJ^qvj^F3@Vy-&mo6W|S=VHIE6!Rnv+VihRtJe=$)0#UdD6tmh`JiM$^UHnncD8`SMHN<`K%x# zP(@$}SKNplojhl}7+L3XqPiQZ=fa!KT?L--v!3Q=1P`G=CXMC(_yrB$_A4LgnLZs=<9d1kw=#w5wXnCp= z<+LRD5DZ}H0>T-ORHufM+v52U`tH^j9v_K6<4AlR)zL`E#@k1` zc_{K9MYqudSPvipMZem4aE{AZUgA&qv#cicXVo1y;jT*{u}n-73>{pSLuD$&vdR^^ zzl>8{QB@BlEg~jjj44;f9v`o|AncNOV^+z&usQP?_F`gqUdmoU1gG)7ma9DPDr=_g zj^m(P>U#5w>V9+PXwe(G>$*lps#r5ds&}up8zN9GXUBx{^{hGaCcG5wn)5>?dH;SZ9Blp$vMfWEA3wKi)Y_xH%pYmfsL4YHQxwk7kTLE4i+xtLuyvmf zzDbAcnmgtZn_nJ>*{KUe;L94g?z1ROrI9kceH~p z+i0Qjh*jLHs{PLQgvR_Al)nT~$B8-H(O7#L;bloSTG=`OD+(EmO+BZ9sWPTK7BCe# zMqpH;5BJLoN=E%z5Tk{or+%Ds(l%I9S!e^%rR<0q6Zz>|gIAW~n1q8*2W@eOM7M#-FcPrI(2PP6?kx5KYyx#*n2b;8hoM9JX1C1AyBP{MS z;Q^7^oGFp<{^#_6Y?W=ZMQZA1r@iT-J$Ewt76y&4zqcW@<%E|#x{ zQBEFqveOcoe3Xne!dNtWn z&4TA>bZwv8m1uY*M-A(K()fFCx|ly}%5gUZl#7Nmvvn%F;CQv3?R`cxVfR5YB3+E3 z<(SR)u1d$FCr|9Y&qk5^j(v$~6tXXtQLLQ%)IZ`5NPxr&pp1Sv`>Op0)}BEDxh`*= zuVAn&{*6uG#q!f<0?LkYVEkNcklj_u*vMR$ON`$1h>N?YN%|vnlo|qbvMbWnc3sqV ze{7F*uj1G>yiuFDMf-IC%h~|BLtzp*?bK|Yvn%G zxc+%W{^`~RKOB)=E*#~%^Ks*024?WVPErM~)?B8L!fJvsjZC`=Z64h4!`P*R&bsR{ zmW6lwe4Gsa$z_3rA_ITRT`wr@2^8OeF`FmdBl}Q_(3+e<)OC!>kX%P;VtK@VS*f_~ z;7ksaHL-T~>T<^T3Lu*ie*wTiQw2Lyxld&8yG&_*0g5Sq45;5fW^JpPWRE3nB=)=A z_2w1oy$KG-oJA{Tie1e@$kn(%wagxSbb5?!uP%j>0U6RXS-wyv(I zv{}9Q=_lA?P6NT{$1WguiA;BEHV%6(vV{A|6(?OA?=t+_<`(u_sOc?1zS9%w%O;$xrytWzDWcnEc^Nukt z;*9(F^&N=Mz9_|5)Py^-gBYcxUU7XZmz|hlGPQ6?q(8If-e1uRFhhVKrcmPf!S^*^ z6fm^DS%||EoH!qt!PNGU(Y3)htI^^2OM|Nsg9(s|_dfx!biWCSAM4hS7%wT)uf1tM z1vf;QofF!e6&?|uShobP&6|8E`*|#gyNHje|lu)yg$|<0tY?M%b^{!@Yblh7UCxehA z0ba5JG`DzkiVAHg;ic9Qb~g_`bHTRogcc|+fpipzbqeJTXtp#o;$)TuPen1EWFJ`r zdO7P%$Mubk{~TC6O@zQnsM%SY2ng&FM?sTyHwBTeOeu!j4pdJ#ojBcK9hN&=b(0qr zcw@SJ8O=~@RP#JJ5N{w3B-L*SlCZKn`ByxYKkNbOl_go|*bJFB!fNV?JLji|`@K0I zPEaV?)4CuG;W(T{(@#Bo>k+d&VlrSo@Rje^%8!+mZGQybuqolKHy>W3V*Ol3f12*v zqy|K)*NRfUuj37YdZpv9yo5w*?DD3@=(X+Ik?;8HV-p1Yamxb=jmmGwH=d*GR|omJ zQ-H!&`V}+`f*o_D6wFiI-T1aTq-oTdtHvF#eOvP6v(o(_$tbz1w`O;h%-pmDlgTo- z3VR?EcXfN*D3((oS>?tZ^e=!&7up&(ss>D&U!W`x#m_^U4R~B1q`7n3ywmF0K|sSx zU*QZ12H>rsD8O+WMqKtj%+*WrC|I(q-&5(5;o~+-o>YwkXJ0KI!|%)OoU!M%4+S;$ zCV1j6rlAD(d%$qf&Mb2`!W@_&4POA=wPrx7ynjZST(y4=w5lG{F_I@c_B41W^A&BC zFXWnHPvJBpIffbAX7|lkn+{Ikoco_|E@zrxHN6Cm>`lagwqaUsUb9Qy(UqgvkvBVR z9aD`P12WvkrbFs8%xvQ(kbBFsS9hYS22*-GDy{=|tp`&ylaHnbr5K1|E9bs#b-1WL$p3!vt6(9+!CuQ~9v*4@BCj0=fEG-rYctm}O7@T{s zm<3c-?OKG>2i!KN3`%Dt_cdMGiMojsx~^`EGpi|$AK34tscvnH&JcdT#9R9*5ySq7 zU1`2WPWUw_H#(U0p3ojafo+`at&+;WSAnYgA>aJjGJ2F#mRgi3D`eXOL*CnswQE>Ck zX&Krp9y%LAS=v@du^q2xegUx}vd;JByxZ&-CGH};ZjwpWV@Ey%8xScsjUxLy;#X=n z{UCHMpQHW5-v_h(DC7?AO}|<+^Sex?ik@+9wy zzgxw(a0WlHVmd;J`5byEn2(4q-`jM6$$&<-`LP#!5;w3X0TE5@wI=4{fo5hjbm!q) z&DfNQ!X)oUm-PfHD#ga1QT!pgO4jWC{uVscMuZQXvrGqt2?CQji%^f!fwO^oz+d)) z%Gf|bkcTJVBH{%fUm680O*u`n9IZyP5UnSRm~Iv1+}Brh**g9!`j;Lx`t>msYDlpq z;rl=1s|Mxx)YYk?#hu2Ze6|8Vk&TzN;7yj=K|w~?P2}#GPGr@m`yf6jWD|)!EDN+v zg55esm&vavPFvvj7}a6sPX+A@5uWob!hU-;?m(bifLn|f0zZ&a0r!gz+_R|+Ft3Ll zTyP#tizz)YN{o6ZWh3GzEAg7g796XX6HkY{&ZEY76*z-w&HbAan>1(R-s5_oZ^Z~N zo4h6(s2yux(RY$Yh^nBY zXGQKxW?ijS`}uMP(1f0}5+9x#by753nR$4!r;`#5q*`&O$MYf7%I8ye3O~x}C#6Oo zS`_E|()-*Y98J6N_DH}4Y8*WozQ%oPw|)R$Mj9Qog4ZQ>r)ynU8pnPI;-eXbU`1Xj zp_(-vy&j?HnRA_89dkH9X8dVhij407CCW!Vz8f7Y&UN*xhI$aPa>GG43JGbHm1~D> zJk)+N3rrLVAQh|30A4^dFeYjuOCZv~;dl|7f*UkE{ ztXST&^0>Yx{aMH5`xn6Z)jqgt=2ux(*XNRw8ujHvFKhtG-S&PLm64S+qw`hA+*t&i z%k9{-|FpLB>%-D-L*oUz_Jb*WV4BK^zI&!~EfQ?fISo9v$G5$;!dg7-`x0&Z2(z9o z{Ke#lP#(wKEVu{1xhSo^T8~iRj3jr8kVU3D4%5ZwQU7L&^pneUR*&=?wkk`)4wAYi z3%=IwXptOrP#3n(|E%*(LXwQnow6`KV5P~wzf}=pGa)Aj)1Saj-0RZ&DHCkKG$3wr z_>zww$2J?=9-%ROWZ`yo%>uD;m+J=s_XpiW1a*rPq*C3+kX&cKafhPF;|+(LcA5IF z4m+IcM>Tu)J0Op$(!I`W^Fz0$S8M@5aL(C0nBYZ`El9tO|9D@2c39WD^gCZaO6#wH z2N*&+I0Z+d&m9%TwGjRfg@daWX7GSt0r;bJJ_S)VR9z_YX=e5XO)}++L0;rDCGftT zT`S*W!{P_7oPz7dj8(HM4d6IB9FuY|`UO0(So$1G9bKjqxwvR`dah^I)W`Q}@wLq{ z{zN{Obb5o!$z;{b;pVY`488fCsHiA@t%skj(nNkr;^KLD$eOibTG{V#i?Z`TSzbp8 z-f#AM@}utXGci!}&o60jtDnJrNZ-FWd%sr}ov*teLeVHn})uE^JK&iIGG>f;e2o%`8g;*7ocq~K8 zMyI0TsL~-Y>sqTWpk|rd73jWEQb$|x$1D|WBeXe@um^fU$O`=JSb0Jia9}%B2?ylX zG*<^Q3brR*26S)M#s@nQY1g^deU7>gln&!xttT0o7}6#3W4tTZbE8Y-&+~f#r215j z^K6q+;+lQ=5>oBJ0b#cx+R^_`wB(C2tkmd{b?@z&t<|{XYu^y=i;BLQ6CD9TW(H5p z^_Dm_(?s6&a`>-tqO*~a+_CSZm-<{++13R2y0X`L}(@H z6izl{Be=fzz7G?5+dQd#O;Y9FZ|>JLbhkB0kFs{Sb(6l=$=6N^7Gj-`CrNl3>)HQ%zbiqOCmN7+wFtbMYk{BRQGDRW<7ub!eb~vXVj&Gu7a7?`gxh;n4IpXf9U$Vcumhdib;q)S z53*_7EE@cCnWViw*KZw7JJ|h*qUT$zO<)z2(zNgag>#3t$QJre=B7rf34OImN`cp} zDYz@F4lHhHrRTJfl-rD#np@2QJ}MPDvAOrezjME4ei%rgPGtoFtUF?JJyjUL1+U=O zO%29B-fV)liSpnQywayDB!C@ zB3S2HY4YySQtkoxKsZn=rVPxRNy_$tvN>DwW>b+t;~v zTUqbU$Ey@`zdiu4H7&9Ga|}T1l}^IvYXZ!;KMQJs+g+lb@z5+R%+dSNm0uxx8u@aL znVV@a)yzMVc@Re6yIb@>2z$?PxZ1F7^hqK?A{ZsnM;8Q9qJ~M7=tQ*W5rXJlFieQv zdoK?`^j@Nk=q(6QqK@9%sDm+UFVFjadmnrM*?$~^nPse5>%On+yw1`E0vL}fTCn$d z!sUk(m*<<^j0{MpJT!#PvT4o}ew^0NcF}`Va%w?~5c*jUFOhfmFVA&^&Vq?)UlXSQ zZ-E6$7(5!6-v&%8G+jaC-c?N?pxJcYch9&oD}^Yf$l}aOl+*6R3=|L5rLcCZA)AUI z5IBvO8yd0QabPO;1v^t81ABP3J$KcutrCcAx1 zFF@KWPn*_+Zr@yAl)fWbJ|A*YZ9`ibsQNtH z=nB6o=s6fKQV?ezdnxoBiK=(VUNq2ei^&gMc9?CFbCs{QzCV-~!XZb{|FCzPZk047 zS3pLAzi4&#JqxYeOH&=YPt+q`+ElXki8M!C;PT(7(vZD)Wp^`W+6sVKk1M3bB<{16 z^JgQFNpAa9FE$LL!jqcstH}hSw%`*kIoo7P7w@!YEy+1yQKw>?k6t zFUZ;tAfh4is*NdXbL z`y(cBK&HGQ2u>2wo==*##e3hhla-gih_o?iRXf1pu0{HwnS|Ig$S*B>dVJx{=hG3# z0ygy$6F0l^+;ZoQnDc%lLN^w7Jvep12kxql5PeY!d9E5gg$SR%s}G6=9e5^tg6V+) zd$BCJm68=8EKm^S^5wsZkQ^J~CP*oK3+W{LbZPly3=hNKua&Y`;$u-0lc9nUmk$8) zS=2F~*xGfkepxVyO`~I*<)iy8esX;3!Ns#;th1+|vhq-8yU5!93?l%$m(e^z_NW6R z>v*LwmFkg3L_25LAp#V!8?=~u1f}A$QSAV`!a|u zc5#Xv@%!KQPkmFh56z!I^p>VBx{E{6p2-WWW_PHSWc?9w%P_HFXW5&clu!zX(dXGx z?&1s_0n|E}mMA1FvsOuAnR$GW8&Uuz4rCvxTCvM9`rRa?G5`BcK0TvoARuAXfL0;j zikn-z##)=+Ze&SBs48Ef>`_#ky;0!b`m$uVD6DIOLC4l-*>KZ zaPTC8rQ@LDTD;{ym@j=MjYIZk2h1>rB&%-Xa9jE4EK*uzY0Q*93AX7H)#@GAaXFl- znF%p*INfcFM&Ml&otQ8T)(iU@y^{6ST)ev8@M?BX4Ck((SjGY4!o>g#`&rs`mrC+L znn~C$=$KOi?m2gI4r`a!^Ctn@kbi%xWaV93eKo(h(9Iq}&3AW?9-O?&6`j$n;w4mg zgaMc8=~?D5HVD!3R7_jkKNiv{geG)?OtvQ$cT81kW(P$hq6ojscvJfJk`CUSA_;he z%>X3U6no6us@U`RiA-qU_Vo&V`sQyH!^Q9tfrp}3DSiI67E#RYB=kYr4|_q7rjK%z z4im+SS&C1|0V`hmk3Wr-&M?YJ#Z;fId&>A!11pkS;dy7rqTWN>-v%4Fy5#ZC>I9&;Sp=zpI@20s>gUqDgIoco<kvPmwzpm8eN!QulK&t5} z@VAP5Ip@(RJv#GsP#j_X6vv+Ir|l>xcWsJJTgeclKGe62K*u1DXZnVYBw? zq0!0O`Q3<*#VwDci52td5q}bf5)qGASqC1q3Nm+EN?WgUWy)a_YBkIA_nrC8?g z;JXK=h8Dq4FCYpEV-j{KCw=f(Cv?iVl*1NtH3$1;lNuRLz>K}rTTpkD6hz~ll zd1YW|E;}!p%*1S%9DeXqt%`a}FkRfqht_g}Q16T|;}Va2RFC@ixfdouWpbLnn# zQLFzFVr={=!p*F8RA#Z=CZ%*JDd#1ojn1TmP=mxMBRMY+bw|Z?DZ_{QzqYYno{x?g z#DUzT|8pSr_TZn8(N=Hut&o2RwbtWaIX~jIsNot@u#0=g#peB;=(LDVHeRg3a`JM+ zJbdL*6wC(10bhk6>L4|JJ3XkDKzck*FwfQEc!~ttt82u=dO>Fw{z$t>y!c@t45J}R zm^T!|l){tk14PsF#g{PM5B$;U&Gi?!d%z{%?K#G@ywd5uG3;#j)w{Oj#|v((%C#kY z9|(!9D~<{$+12=F4U!Gwv_KpU@OAMqYU*=`W9PjGwxnW=m?Sdh(7H6!*VjrHDkC<0 zT@nrkBNN`Z#%~-06n^`8;Bv9T$c9e$ptYhL_5i-%2>1c7gcyKRbx+AT z1{lDNWA4fjxmZ{&0ECe+oYi18aCUL^Ad=jEH$EwzNBst4Pl|#$k6ZEg7(il6MqnFH zMzY)J(g@M#&x;I};+idlo0%gg*!!2hTHkBdE|6Y%lP;>WarVm{c=10l0SnEPLXUm4 zhi)2heH^V4MZj}Lsn_fGpnEBSQ?n}}`TM)OQia@23B=O%r=8K+fiptRP0JG8%#7gZ zWHg@;>0_aIhG`C~GIXkZvS4IO889PsqZ)Pp$;im_Fq`hS8d)iJu@enp_c{K`Mh~I9HEF7;Yna@vt=c4;2z!~C1+%|Dp2)y<;8Oe zUmQFjChWWo0nh~hi)v60a;Uz7nY~VIoU~Hs=BNgDP=sxY)5tYqnUJ&_y#a#<7?wGm zPHcK(E;{`j|ER{XLIFDRVP4#?)QQ~ICg0kI_kQ|3?<3nPL}6`Hzz;K$4gcDsfd1amT+Wy3ygqIt0fV0N($C zNC3Mt3%-zPX!YtV{0%{Kk_mhoS*3Vr1gB2>a0Z*XWA*89mvcj0y zkGtQ5J+w-xwCZ%ZU>j|7g@_L zkB6Qmu;fx7(ud>0IKWN7@m)^0FGNvu1w;;9{7C#uzR*FeKT6XmeqG!%>JL!o=D&ZR zqWw|1gYw_pCCp_8amT40p@9$ya=yIT1EftxX6R!QO2vnoWA3%jHorB&s*u%NrD7=8 z)<#K?V-}s3VwB)Y1R2i%bArhfcgy9m9X-K3H4O);n$2Tl;4piZ-OKfW35@VkXN_eF zOpz5Zk7La=eA~~FO=^FKm3%0V=g!4oUP5MJZG^%TF&iC*!8Z&%w7|sFui`Id24dyD z7TAZhc`Ub9!DPJ8?w`*v6>Y}f_+MSRZ!u>QxdR@%{{%51Ud1Fc5;`SaA9$!M)jDJ| zkEL`Fd0ylDMGJUOUXA6`Sqz*Y7GE}U-NF3L@~mgSSxR~tn0$W#X1z1R55K}(3meth z*izbgX3^-;WH3fdJsj1B;ECOyn{ry&O(b`Qd6ZM*|2UX$qTUDwus+zJ6I2gEs|@}s zivYk);Ul`kl%{(NE1*8_5}!+*eP-AcJckZvi@6kc)!VHtHy;f4U(|1Y<>L2Ls>7_3 zr()-N9jt(hzX{Jv0V!N8OaAiWE6SuI>cjMA;J*Sh?;_y%tbF|RqPKTzdlo$9qP`;% z`;R2O&zbyngT^$v_+y(?m}v#{;*e)S0?F{#Kj>qAf%y$Hx`nKrjLE9GAE@=^htoi& zE?873V~wrlz&zLNy{qI3lu0dfhsY7JfRcs=$7q!%gkG?`#}XWxej<{R^$={BfTtWRNVvQ6nWxL* zth2x(RLoiS1f;BO{?55G-!NHb5I!Kfkg~?!%X1`bdE$&85tiD}nuBhD6 zD=N7XG3g2waah=;$%{2yp7rLzHFEG!LjkEv&{ge|5?qvtH>_S|ECDmE01{<5sKGvR zb5y2ugAVfdCDHUM8AP#|s4JE|_nk?BBKxo5WN*#a?n)K2=qUQQlE7t6hP!pnUyh%Q z2e$c29x3&qB*BTQkpnz^j$^r@b>QLqI&=^1^V~N^x~4hz=}!|4t2A_hd~%sdPn@UR zk@vOyUlv;E)nk){`Us~NGc3_}SImHH;7iv?#7OVfWwZ-xp9S`NmT=T9TEwaqxKr+v zz!WwVg|E-iVzP-{%ApIbo^GMlkv*tRl0i#<-wtB6G}n8u9lW@kacgzf9d! zr`^D6ypoUllGeZ@=ykT_Y`~M9QAxSb;hpdU=oG%m>?fb=n-nw*&tqs50Zb^K@8LDp ztbhGDnLvZX*)KL|@mvEuTXB!ODv3fnS^2V+bEnw9in$WU|DsGOQ~SS9$;~2@jFF5f zoiT<$sDOGf$NGjO^e9bn(#b|#oJ6iX`c8+^N*tg-2t|x z#KeL)w#e>iW#21ka?jG>$dB8C?8gzPtz5O-!as7alP$3+yzLE2)v-z8Z4W`_gQQCJ zwg9#GR|Q7q%z@QkI?M?%g%q-}X{}+Y6<$f)+ceJoq`EH5)zRmLmSV3J6OUF_Aao9<&OWek$d#jTYN8Zd?jj(LH=R` zFOY0H8);s8(J#U)*Cr>XiC?2}DDMi#OZEP09{6rW>m-Sut2CXYyEGAl7B>U^Tjdfy zEUv;>lMQ#}$g}qBJv`vn2kb_coC4bg)^e!)(bwGiN~Pr>T65&R#V9HbZidhqD)CyK zQvJw8+IxuJVP}n&2PJpc8Y6*ukB^hq1fJ?k=Avrn%293KMxT4M{G4-`IcW`tA@+eB znU5bPij~mobJkU!ns@8AR+POxfoKLEN?Kv9utpafE4JYQZEX!&jxI?L7Flhjlf9h< zL?mV11CHsd1MSfMcpFf9m8j7c@el^EDJOp2eW8}SzZ~2HYeRUT@{eL=eE{1`L-bF~ z*5v_sE9^CV{D79%Ud~Up?(!Zz?clse2L<{@$v=RC_Yc6W3HuXlgMAr=Lz^aBkL(Dp ztf4s1g||b$xgf6R&J6JqG%5=yQ6!ASY&&o$yUX^Z2P{ACSM5@ zS-rY$$mc}&N{UU23a8%!50(7@n^>Z=RRt1(4eRsT&KEo?oZnJ}oG&2M6Q{>MV=K*{ z*5<7(r5}D(hKIK-dTeXSmN37ldog?mX(H0z5T)*nEt_2p16AHBn7O!T)o!vheow$8 zIE5#_;E8yeskIHEoUMvv%4Di4Qi{xj4G#SnP4*JK^UM0$=AmFWRymA@*5On zXz85G(sl?Q5T#nu^Z_aNPl2lgwHVDI z1|Wju`(xW(oLR=`am{7-$RX@}YM}CyUJJ0bk8ruZnlijGS;Gd6 znht_SLz6K7l>?u2WV@nQn3V-*snBJAjNtxIsi<(&2a8v6)^Nre7G@L%>E?fsVXb)B z+3SQh0HgEVTvk>dXkSu7&?`SMJfMw16L<~sNYUM?z0KUDYd37q0xIfv-rY+h2y?Jo zK>(fSX)m&_?^#qC2!HB!0G$dZr_W%m|GiQ?U?eAXk@W$E_i{su9(;!sS#h7j;C~($ zbeQ1!oh6eNJU8geg6FXp2frnvD?gxo&y8BDtzP(wqs!8Zfvv8F;YTL8f4Uwn(q%^MfXJZ!LYqKCw33E;&tWfFPbV^)k)j1%&2Wi0{=QMQy{D!O{Q z6Po7nMh*GP>M8&B{W%W!+l;^JI*%!drJ&0DSUZGxTtZRZ+Z$ocF6q!_1WAM7zTB#7 z0u*?Mgk=~?CIYYsU?=!l*g4Se^qfNAzFRDOw)gvrRWzxxO5uWUGjq;2yFV6+zdIRN zdCMPSI)iAuW2%L5vkvJoFH=+x&Vd=k0A8P@w<2?DCh^?D19I!tQ~6V`Ga;rp%z{;L zz!+04e2AGjcAQzbifb&+#u{ zEgR9)hx6JJmB~{@I*GCd8@!xOd3bwiI2JvD=_03stDf|+=vQO*HdxahNiyEqYy9cq z(=VZvcvx)gzclrqagIL&dY~vo6Jqk}e8xs$ljq969>7|@YJ6TPZmRD}QpI(P`{k~; zX*7%h3MRsb9{?Vx!ONeBHOlz-J5f~oBp^ROw8qJ9l~uzIWWbnn? z)!RB5EFB4}4`5G0twHri=wKDawUvG)BN)E^x9S_n@{-25t?*Qhlb9%pOXhvL;$zEX zcngCcAFSR`vOypjBrUjsz1DmI9fX!bgO%_ld-$pjm#9!Md!$k<>AP+7&&27Uwd(5^ zs+Pp9qTN)q>I}gE_$1j=&%HnluKB3+kpF8|e;)~K)qG-liJZT@Z%|}?JS2*qt>3FZ zU-}&I!?(7w5Z+BfyycXkkn0thdy`%T;Gd=AO%d!-boAmdPOQaA!+zZ53Zbcj6DpUR z;_FDx{zKJtQeP$GI|>FT@iX=gr+Q23smz0pL(T5D2KQiVRv|Nu4cV!qxFHb^p-|;U z&e9FU?mNyvIWHP+f$`^zd86o+&ldIo%6E2dgm0=7y}SVUo~H>*8JcI!{`7l(f*obo zVRHn+J^CvRHV6M!)vw4)0kMofv}tv5GO{C@L5xjnN&2V1-Y(C3sNQx7c(|N;^4OiC z*53kgl0mBfsoi_x^5PBx>*DxYk*+k%)SqE?v7NKZQl0wrqHaos>|mlq`$u`A|?_MJ6k(H78w-j%;F-{)X_N{ieCbtqwkSK-!HwQKQG z`O$G*7Aaq=R#`Uek|cPr55kF_{;vI@k5mlDZ~vbAip}o3T4(EX%<($9RRM$>nit;HZn+Lz1cX`n;7TYg!F`F+a>cl zKL=?h)vdtL>4AyM5~v=qa+Y1i9rNWyhGVfjybF}qYEX*3a2J2nRg{F>h_Mfa=b$V< zfeAZkPx?euh&aAS`y-nyuM9`$N}t!WMg?zWd!{v0cjYiKW%Yo%jH*L+(fPc?o+sT-+SJ1_9 zrbJz8)iX=q*o^|8rctyHx?m>$~|*F&H?VG6_Gb zcK%+S zCW}9M`hal_|HX%G#_U&3xM=!sySXT{oynrq6+avT*XSg`NJkp?UU2Q8KzQ|E45VPN~vCmq^-4i%iepfUDM4fSqBUuW~nh>JSf z{Y<`gyn^kxZOPukt*Z{aUi$ieof6McIOfi}v?ABn)t=*CuKP$p0CG-*I&Vq4Nm-Lhu2m0mYta z_7m8b__J~AjybJBMDi98jjw7TGM_-!yBKh|?{|%3sUQTW2qFxK&$>|0cWWO)4-cA0 zL|by`G6hM?bo%m>utMpLgu;Lh;a8d9P!*m*3)hE?_JWQu9Sbm)x2B*TVyYDX zy>i-Z6FdDQU?y&{*|M=)UV$C{Mj|Jb?vp;e#vu!r?&I&U6OoaH3EI>z6?&USgFwgS zGqkTyT3f?WJyYi4h{d0`_S)ek+T)HAf78FKIx5stqpTgJA@8>$6^ldLweU4# zO}?-;ZR8erANZL067n)~6z66kU1b45J*xLHf+wM`+a0p*^KnML8)dm`w` zw~Fgp5x`O*_0iuqPV;V4p-Y{?;d0@R@B!DVYIkdLuiX|$aX-$l!Wo``I3i2e{)@Kcuqb0*ck(Pj`^svm42&=sGN6XtRg zrrQ+&Az)V(?iXM$vGot@bpAKuj4@@vv2^B-!C((mKz7L8zJfrxRvTZ6Kxe%DfMGSf z?-5k6C^kcnERVCJH^Qq&UZH>lRyvkr7et(x$4!P7U~qoeXXAX;=1|ek-7Z@d4T9ik zxtKXl4JuP4`gJ7pCL4SAc=Y8+$+OD6X(KgZmA(ZZk>!0_cq!T93!)sHi$rG-lWSB9 z@J{R&eDk}yhYTaQVh9EkdccLX@v>nS?JG@RNXX`UdW-)tW@7BrPYm_Gq{biImt3pe zZbt%z9wV6r2b&@=P?DK4eLVu>qZdBI(26Pqjdn<`+I*%mKlR)l4~d4+2}*UaXh`Z; zeaOO>$o>J0z#KZ{>3KCqm^u(UcI0CZz+w?*qAU*QdN7AbXg96hyi@x24O7SX**?~~ z;~I^>CE9u2>+-_AB|;~JaU6oO+|Bk<64#_cbOcymV~bFaaF2Jmo@DpoLkn9tM}y2K z+);-h%)mXh6w5&WRCnU=_+UzX+1N9`x%lHuiG^EtSfcZULIDnTVgAtcXO17#s1zAvpf}w)wAiMdFjAp@;CMC853H=`iR{M{-9O%PRczJhoCg=b8=gU8evahrv z+mor9ZmE;OC!MU=cYXTtec2TcC!~n2>3h)O;o;r?zPclaE;p*rVN(s~yLt~D$8Rr_%{l_rvHr+$cJ*C(W|#j1(7+I+h=865BOAmV{E3%DW?p5 z>M%tgcf`f+%kBso#FKaJBeu|EoA^VCG$6_H?&|xWR~;WDpu0BgPo3ckzc#X=715vo zuHwo;b~&1!TbKO3gx5)mw#95vKGQiWWD7x8;)^l++F$GvO~Nr&;d@%LczrNcku6lc zy*J&gi$%(KPYHXycKk3t{)JQo0~%@kw#(1u_uAoU7(3o*0%Q01#U8v|oy8z9ozVVQ zKdW7O)g;<la*~8{vFmOX?Y-8vgnikmmjyqa16~xs4xJDnrs6&Qlm=_agc^*w6qq z^>*DHP`>#3QQ@up{sFP6FgnU96OYV4kHP34kLcR7eOUQ)U82EkJ?!yLo1p`dw&Zj+ z<_aGRVR^04=_Q3Q5r{y6<@&1U+v(Czk=By;J>Mgew z>Z%hct?r&@XY#%C={Sd8aHsstIu0U=Uqt~I2PxWAwP}op*r9=kOvsDL_hvF=w$mdC z1B6VO;u#~ia$<4LfsK!0xGxiTIlBI+5B`?igk|xa1C1NMYV;Hp8|Tp?#$p7OPJq+M z7nr@Fsw37ED~OBOic{&e_3jD&(+)+DbEG+bsQ-rNV-8UPBLKRBR(d7oq@l9Th=adAZl4h z`_q04(*tIDkHC`FG1z=c$N;3hW}NNfLG`=gn@n%%CD8WDHiqg)7jpprtkJ}Ah^gMa zqrYrn*E}~9+R*42voX;23Jcm^U32sD7NB6w;U3g(#0xNkH&Qm9uc@{&^&R}Sm)DmImVv4XpYQ@wfB{ey3D zgEJquf5ZAXedq2uJ1@-=-;)1exA6$Z!>e4nH@7^+$xr?mi1oX}CPpZ&WjAH$68X6j zxRdt75G26c!JJnWRI>z%wqn{fsHILAr;M-oWIl`I6*k*pt-m32S>&TbVAv&+COd@N zz4{vihbt2QiCz&mE$3Zf%W!q63)$HW$n+l-_yMT*_l=-qsT8X@rOq`h!}~OFm5odd zf+1>{KYaShDuN+Mbv5mMa3X2;&^675=G)%LY?{ZLtsI{3A1gh&q7{)OIMnC3ky8Bja&mRg|^mvsYKQO4=Sq zQ2IvH-Eb1sKqA=oF@%n^$QP(0dnqpZRy@{)09wz6uNr3+z{sU-zLKXH2giTikgj=> zPE6~3+Ytzsw&O8S>CLjqutAo7*=K8xMQaGmf$sJ_Nf&iMcEH1`wA;NiL!~3AyCmwUqTH^Ta zq0b5qCh+^jWS!Kp7ZvVEn!zDsjegE}j=$Yksf(_qq7tzV-!6t`TdTXX|#q?*#w(XYKbVNTnp0 z7stVA)=0Px2UhRRvh6^HQrRk#~8?C44b4{-NqAP%s-jJ~YgIFHT-4wver zH)J%<+se<|s?K3TICUMkif|1*1k02ZKGGIJCfLt|+za`)tb;vFdEC~8J%Me>2Kwiz z2XkuKLvh|3@YdCh>y3L~4}M}dDnkM&H}mDs8$Q((=Kw3HWWM38#=nt3=5BT{YiPOV zOg0s)M?`;6{q%_n`9jEz9B%N7I!kMnjS^g*V#goZ|Bs>(h?s?YOTgIZ&>u6ad zdMKwTeQ`F90cy@C7VFvlPJmoZgl<~lmYYOC~pYjH9HM8gA= zgY>tLr!BMdweMj1nsArEL>@Z19(8e}W3y;DRF)E0ffNgRa8 zg-bE|>l@v_eicG*-=>}|wVLt;cevC!8cpMFTN`#F|51|dN0g-ZSszk%YeqBU)g<#;?h7K-e-7S+E1SE6&)CLDFh&GPK(?Mj3nSw;(6+Uu2Va2y-_os*x{}rL# zMK(ZD72vX|@i?6+iFFB2f_kBo%ksnhJ#ve|p6{}?Ah?#ytW(pDSn^cF`sk23ePapzIW*7 zr?&?OrkHj81NrFLIsM)U%>iOAKTw?DrvdUD_%PSlClV$d`}wt#@Hd+$ZT zHorvgqzV;<4Yc)e-3f%9t5VyQi(`q)?vn-08#ALk_hVH8JNI^9!8WFMeMDH^zxK z=YdKjrn|aszIOMNG-~ckq>VtFJ{Mq}wpycDL1z;KN%mv6m@?V@bq4AF@>O9sOs1}~ z_i>gT4OdVafP)*isqw9w{F}T%wtmbU9M||^#_{>l7lP5-I6H7iwg4)I#h9PX8pd{p zELA9&lQt8Ce%0G#gy*ZxkZ}^d)q@O*1~a(1y<;fm?l=3}e<-fg$XswUPnp6bu=wlB zeqKQ$4@{80i2mOy@O1-p+ylBE;yenjmUs!(hhyK00ZQdiYFu>;5lyf|{EOFzcMktlNY1!bacv+uL3aflDPPpQL zLK?+qhA2i)d!d`0b+xtGZan$gYnngK!S7(F+Hip<8(CN3FKIa`t&0*d7j|| zaBB&1#iOJ0Z@F;QupV`<2MK2G$=FOK=5o=~0?W~xMhB^8Y`}oeoiGP=%v#zyjRnT^ zqM4X16Zea?qiIgsRgJ~!@7cckJJ8RIzLJxb@=pq?H}NNn6HDr@K_4;-$?pzs^Z`=- zcEM-#dD4A{8nz_gd$+*>hhOp6LnT9`%@l8$JnnuEuv_EywrJZF`wFoVX;f$5OYEC` zKbQa4QjeCCU0KhMubpCYZpP_SMmtqZFlD#IUo3u(RuNzYK{P*k*eh>!bC#x<%$f~DKnHCi5fKj%mwjwUvklD`pE?Mw8v!{miFeEnRZR#kL*N~eCMjK6@H48 z`fQAV`%p<7-u##xzSD)eyDh|XLuHnW&XVl%8h~h6487N;4W6qcWJSH+dG!QMq;Ef% zrlGtf@9b&(W^!y4&S6^R2he1 zNjY3IqaNqw9tcm{2Z)vwQPT@ct}-vFEm zvCg851uzs4Cu5P1=aBI9ZxfI%-2eOSlRutMa*v+dh}xmv$Dw_U6Sid15(iq(i_J9_!a{I3oOb>!b`$JUA*n2Z>Y z`8jqzBxtml`F8$<26#8>Cl!dhe)WS}iUIe-Fz{CCP&&8S3dxl2TW9~CGfnj(I;bD6 z*~eP!W}YO)QDPqkVib=H%_uOq-~+#q+mM8J^S>M0zf(day_MtMK{_q&HP4BzMyfJi z3HLk#{p#Lt)c=ZbPYpj4WiHAT2Z>fFQTY9#Ro0RMrV+$1sqk|?u;BO=MzE9L z&|N}94$_#kxS7WcJg1hdCrnFY@_6APoXQSvO)KC`Ng5$1i@RZeeP@=c2>A^t;FaYG z-IY>yIn7=(@AAk{(0WuptA+U=GdyWbFbO#HZLkdY04*&e=p|m$afzAhEz8f}qZcla zKW9***1y{yTzw3ja8#%apq4^HwoUx&jz5{Vdmn7=V75yhu73G!Jts=;rMLuc!ZRhG zyaM%5l^3X;@`i!PZo!ckC7t+2uITP2IQBF36x1MFsC}2;(UsQq!C{KF&nE6 z(XL}P09hsXVk?b`u;DWy!X6g!x&O}Uoy zGmNTl*r-|PY*(+QRx%G1&gAueHZ3YF96uwGI(c7ec_3#N)eGQdLn>Nnc{l!H@=K_3^dzoXaOFmD1 z348yAHPW;V9;i|~bPfnV56`Gr>rSSqD&2==a2ejqWytY*gP7G}T}rwPW+9#y&1kRy7P5@tGti-igMOw*t8<#&646U~|d zbJT*fe&ow{8m)xns>Fn>uIpEBn=$u?w0beYW?#@6k3$Qhq4C0>kp!PkduX-A$057F zN$$7j_waSlqwAJwgzcoAT|}4ELdpyVHgO=ng&tA<{`-NWtn81+hF>Tl(_mFqsLe44 zB(taDW)EQNxL1EbE} zl>SF`j+iQF?gnN_UOQLW@(&`Fh3)!k0|l((&MO)Z2~OX{jX`{R~+b9^fo?ECFIC={a5M?@hTU2SseTkv6O|&x7}qJtuv}; zg%YCsB{U>d2#&khc#ezdWe68zth8PM5qyv0i|g<2u44DJb&co`99*LuRqcj_8}EZ) zIj6OaO;=-^sP4IU?-_kn@hiebiRJ(MZH_Gt(n0Q`_2iv2*+2))9)dXK6{+j(Hi2z3aIz*+a$t(<+x zLIp<_6}eFbk?Fd2nL+hWOVePxO?)c?9?(Nn^6=~beg-%2?xn;3uW_Z0x$&n|6<-=| zha3#WF;Wsq(x-=0;Dz_I2?UVE*U;U*M~pXqHF6z96sUQ7l}$m3xob9^dR!&Rf7h^e zhV=&nzjPV$%{8Bxt%aqZ`H>$sx#2kdLdsUq5EdeRk zVqsx+bj>cX&`P(^JBMIypB=p1nN;+rE0J&n#!j=Nq4ZB>1ef#|^A;zFOLS%h(L={3 z^(w>I;Gwwuq{vh3cV}eOF=@+#Tp@(>3-q>~=u!Kh;Rg! zs{{dlapxu@+ZATm9e&eD*b79!vs@3ECBe}s^jTvjmAJLTj!sQ6%b(-k;_y9*y!fE3 z=hzbrrwxbDtgY_Y2U;46g`{-y9f3ym zmJdA(uOmy#j<1go5H7$Cf2x-GgOIaq04wg(<0-tnTiIpB=kaKtX*eTGqB3GIn8ugzGDGOTFsz7M$ z%Q@fE|NLS9AHz`n?xWu|8=5mK(f+!Z?{)}%eTNE~mjZN9ORSs8& zr5J+#wPZz(M@JFQa6duD$_N3hFhO5STiaSP?j84YLORP`l}iu`&!_Y{gE>cGj=DFp zclbq|mcwZPa9lYm>g|3I+oNIX>6totj#zx9qim^^cNKVFb$u=#T&Mdg!2z6&8-Tl> zsH-D;@R&EDVB5jTsj7z>nEO12Jg&d*M7c%6INfwx(F3Ro!#g|M#w;EDS}FFel5la$xW%%$BbJu#^&lHZl?vKnExD!M3)N_+5jeh+r^;+d3=I_8>B zl9wEM2S1PVvTgIua|4MEj$Em5TxaSS@{PEfEqrb^QzDka;Ox!?vGh5jUQLDP3E~zC zn6TjrwW_~zK&hwsMajeRyff{1a7*L_o6Mu#$dS7uU4UwTvrw=f{nyjzdI9QVSVd=`4ut2S%8_$ zTdjQ*uo7!OQ_^GVe>KwkQ$E)=0w&r)1>JiAGhV|z(OUKO_LS6>HDNp09UT6e@D9k| zMPW8pshY5S>(iGb*#~WsxXMe5K2kxP^a}KH)Xcieel?Du1(97=LfYV}=?0R|oX{@; zeWl`IOgxA3i_E_ghgg{v65=EqfV0B#kTF%B?F9EnG}5+5Qjc~Az_mJRrP$;Rd4YZs z#jm3X0}&XTqARe=uUiHQFJ5e+p!Na|3NF3_c_ed)Hh_oz|7)yTBZWY> zK5F|@C9;MyJ;N|sH)}=_DDL`qvH8CvZ;g$zC^9gwlDqd6!`AUGwxc>LVq!#Cg;7}| zrbnQ^lIYJJftmk&Tj`cq&I!es1tJhOmaJ=AX_%`DJ05z-trxnKGQT24A8#x9XRkkf zpB`CSqbpmv3V}T#L^e0DwyV25sK>OwX-Y{=+(@u2KfrS zzE7zA{oOUt3H>%2lSq7T=?2*lXaZ$mv_ z{>J&7&vQ_^on*T}K%$Fx!%IRG;sxT7gvVDtmF@GL?>eG|^->p~8`%#)*N&3#&mV_) z8cV83@9fMTc*Gx-$WFO~ z;Q`zJ`}55^JBecI5m*e1v|xXA`&azWog#YjLLt3s=B|K)A3EGNbR_I4hEzssfOA7lhcnfNJ7Z^719B zCEg;*2bF%>RNQs^QqE6W#9IeMU){zis6_0%7G?N;O#mJFggt)#;rCB{&=aHX!w}7< z_$?gE++*nrl{76DW1l*GcuR{v!NIrD)^|8XHlui;McA&G)M2=&K$~)@}M2{3y*22F!DzcWa(6k6L9cFRVxk&&M8e5dgngJ#_biBRZUd04X3(d)7412B+jwLMmI1;vP6G( zdM65IS282+)#YqlG_()fhb^FkaR$nsl83%ql#dwL&z^L83!1%t7^!&dAVbxL1bwPz zh35)fGH|%;r`0wX^5GKMEGS$N)3-S)em{h#U)e6gz)k<$$ z>661oqhZjui4ci0HapxggtU)E96&<@p+KD)*~#`cm zBFgwo`3pT{D=nVv$-_d0>MoDxTj~GvnC$N~*M?Be$A)!Is1YJ?iKsnHs-DO#)c zh|!{I?-q$IsG_uXDOJ_lo0iz4_TFNv+EOG$@_)SV`+e8L^P`CbIi2pX4L@{RAZsPM_l|5;)aG{kbIMTCr##f1dehmdO2tMG?*8gGHe;qm zEPBitAcow1m~#*gciEuUJ)1I88gG&w`3}1-T1-q$y)HBp_qy{QzogBxtM%-BGh60< z_e8PLGk3Bc<@V~da(9%&E1jzOL z1;|7@ex-=Y@7M92YqafX#2ZfovaJt`cX%LMPUqJdAm7KXVJ*h7NfB~Er;2B_me56Gff?D z|ABvhuby-+qJyaF1tWu*j_VspeKgZ6q`BcLOR8$}TDpRYAVOXq; zW7gTjaa4U4#eJt(fl0jL#Y(%`PaL2PnG(m-wgsMQ{H}8tk|4j-Wc&~WP?qk#nHVs6 zDabw6%X^DD_G$EN*}2u+>_gu7uHOA0sbAL5jvXntgY10j(406#;-An8&4#q@)%m*F zWyUGW2deTcmmd5r%|&Y@NH^pxAneg%3e8;gsrvH;jnxqi8w-v59EMi70BJ@xgW8wA zXY5J$>5x?LyxWt=;4!1``d57Smvp1+aCH5@;=8O7NPxZ@@^peQXrcb|*|101ji;wn z*O3J8ia+%keSr%Y+0wSSt4^>ZR^#p9HPIz**-lLpX8FsP*i32cJAF^)bLeGnx=TDt7 zWvwfLvK;sW*u@WmS<65hy9&M;4%x$m?pA3)UanH6J&KcO2oQ0H>u#E__os`8g4M4i}0X69|snA^`Nj$qrYAciJ@t{Q;^z$xPNs@?AH^2$v z{S!1*`;sNIInjGv#jM%oeFFgQ?7}}l4g8kWhH5pVPUr7EduJssxiu)$7aMsCmmPd- zt)kuHhUZ@G^gA5iR(8RCGW=M-$R~-C8nJLO>YiHeRWPl5o(}YOEn%>&{QYGv@6Ur- zGJ0iW2B)xu?jm7|tj_0V`|fKf^thk|C_^ELc{>!@=;LH?s~JM~raEK&@4HJ)Yiv@a zXBo9DNCn7QdM_^$DlVD77QOZ?=L`Uq)M+NNe+m}$B+0jSr=?-AVmvHI_d~ieYhS8m zcq>9tgEIDpf8QScfF5E zQ5wG6<{=Wi4gW1NRW9=uJ|+<`_3h1tuv5uHwReD$E@-p#aW{cXQVT2n!( z84qb*>2_ACGXP6iq%Ru?5Hxo6H0&3fszvr7k_Qfu`>TuZ92TtET>I)1uhf|tkpHXF z>+S#)I>jEoV3#;2F5+lkj%Edl>l@|5J+DR_Xq$~j^i^VApwW6UyUgP?h*=}AOwXz-`ax$ zKr+1EQ$P#^rXD#DW1~|pIj1d(L_M;JF&hk2sPvLplhKe^$r{m(zth}|dO4}7RL0`> zXZ=kY5?7ZaJ5S{m1tc+xwjY{WU(&vYvRS~puS@&kAvul=>Z<$AXNrqY9+xWgL$sWp z3O?vZ9ss;|C;eGJyhNko5N7*<_wKXStJ*N%Z*ZNT{E!h=+&S2tH(gu%i2Yegb#}uo zP&80TLV6AHa8D}C;_P&%=xFdBi2KgNnj#6gg;`CGM=^pBuqSl9EC+=<3TflO(y50a z@C|yn3w-zP?Cm0vzWca=p;RIBCbMYAF+tF6MI7fz*MYA=vMzm@qBk|hdb7M17vqeUoio)yJin8YX>q%?>*w-O%gJUb8{9w;jHLw zd3q|El=#m8-I0n zhpC6XYt9~ypZjP37*D}jc0OXozqdEc=ip#3kcj@CBQONBqz_V_;Ka`D*38! z+APBxD!!)NABj3uyx}332gu%l4tZqPrxZ&Y%U-YIlD_CVWeO=keq zmbe1os>C0FjuXoh|7&RPrhc1GQ%%X<&-)D|ty4%SxHG<<5(bhHwy%1bR+g`ke2Mn? z`dKZ;%s%{m-!N(zp;Ysduj7GT^|F@ITNV7YAkNSUpX{5yCti(gQ5;x`_E|?Ctu;jp z_bL#uUaEyKUX<{|eo#_5<7$EX`g6erA#)<%f+E1s>2P9LwwE*1u z*WE;yT9QAW&b+RQI8hkpW##jJ6Ssa?%=KF;8vtscs1k?tXJzv)?X>h5Ho7eUfnC-@ z$>Hnu`;;oNb~lLsn&MHp&+vc8^^EYJdR)lyxPX>VlGcLR)Y7<`7vt+LlslJe7qr1P zu|&!_#N;$^OOIx$8Q6rX;(O!oCf$c8pQd5lUJ0VEZMj}L1)}Z#rO|J}0#|R078~)+ zy6^|dQxwl zcX~5It5mZ8tf!uioqDhI`I{BtqhyYwI!_rpf_H>@8QK-)$OX&$fzg$@6K6_N&65zy! z_oK92-#JTjgxYUP8!F8JBHONIvZnStU!*svyJ&T;_bMn*A0?MGg?ysREv=r!bo`Ip zFEctF$guIOKJy3K4AMhi#}YOc0Wd!DkGa+*&Ai=bG^=})VX);a;?=!+Bm+>L9^lP~sqWg^(?Bc;_&q~KZmobiqzFL3J@v}iFvF~v&L(*{< zaY)E22lxOL%$2*|xU^NI=LwG+DV4EJy(y*flgq2qoy?11jgGs3yXLxqnioMb1z8s% zTe$YPuKj&F5^|+IGQt=N>h#kQU2CMxcbCbZdb#_Gs;cFt0SDh|O(ug*A6$42y#oF) z!nKRz6;q;LjFH;4Ja3)3C$4YN%vgCPgmK7sff zJ7=aWU}oDa+mx>K93^7~G9Q*51U18rpz4S)l33#H?BXmpG_WKcO{^4~jnrFgT@7Dd5A_ z%eTYIVd!x>^jxc6amG58V66QyBbDD}LSN$dMxU*?1Xh{Mq`Xh!esVTn-h>Z+atpd1 zCfa^ba2G1Hb#xxWMv0Y@l5%-JG-mcyW4WIZ99*wpO0A}*BIU}Vdm8#QUE1EwXGQb` zHH?i~i-Rk-?2V< z14=L4_=!xi0Y0J246<*H_TNlQ8-M%)o0>^!qLCVoBt%^>lOl((N8U}_S}W7U^g}Fy z;08~u7tr#o-zgg!%?_UdDsFD@F?~QPcTTWF(~sWvi=|E7)A7P|ERP)r?D(@Oa!gxY z+in86fxZME|L{Yy$lP(mq7Qi1`4dWUhaL2ojl{IIFH#&72IhAWPVd7ZDw6$(%A1>K z!c;BI6~I_t-1+RNGO9JCEtQ`nr89Z_qU}+vmT#{=U6C44DoHt<>Gac_wDh+ohH~2v zLMTb_KQBZVEn}N!A^?9k@0`dSM$ok^POMzyUd(rTS@tHtdOb5TAI1Ho=Kni(-H)5f z5~y92%N+h5t{amn#!8W;wV}Q>IrGL9oNk>Yv2wXloG>#zu0}W#yH1-W3pgL|-gUoS)3_UH_RxwNq^`Nr(yu zr=mda$~!-Woe!9d+<#t>BfV?_V){w+tM>=PU=@*E#L3Bt{oi!)e31uoWzxq0TylXs zzc178iHgMfJm1RAPMZOQXxDQAF`bqAV#-d)UFHHMSI6>~$**!31PQvFHYBa8hOh&< zdQdBRI}ejPjd-{9V_baT+zGCl5DPphn#^LG1VxXo?GekMUg~^8?+tz<4xmT7k+^S8 zGjE#0s%jEHm118jDaDp|t;YP7sS&T>5@tuNIYQm~x@r4US8T$j-P*#_*dXHQYAz0} zH)p=?muvlFiqXM`a$Q1u4*VowS5KO98P}Nw2C6pXZMIS@ASshU$)fxTT}{aA*Govj z^-D1F_p5K%x08k!t&18^)+Z^s@DnSE*A*Xe3op)me$(;UV!U)@;Jnhn&jIDfo|;|mVeml$%SXTeg*ZKn?f zGuKo^DSPe<=5#tpGFr!X^DxhYjudB2PG8LJv^-^FtJ5C@CQKC977;^QoZ(91k;w29 zAB+>R`{`Dh*}z9_uII&&3CNW~UeRQsy#_q8F40yPyF?0fo6s4YUlCSZo{PDJZai>Z z?YXmv731V22aS!5GDX9FO$hKi>wT0bC0!OUD*A;=?VHMY`?+kDm(R;+wET~AfJ05< zuar-^m*3ElFc)t}==1(Cpvcq^gY~aphkg;Fzq>Yj6t4wdrdfGY#d5yp%6 zV99awBi~KS-(4+#IV$i^vXPkcq6uY7{AHofOlP9cMKt5*`ba{Go4)*_G1XR-UM_lE zjsyCS-PSXZ(IrK)lmkdEjEu(||2?~E7u~_nk$iV4`iOshqgkb+P`|~kmHc(Rr!`Dm zJ2c$9HsMz0$v0X31(*A)H-IM}PF~@0(<;3w=}VH3l-Jt|hv9&RI@7UU%CXCMC4C zl|9Ftu+vw>+Eb!)v~6v1b%!0^UE&Ts%MVzInp#JsaaQyrq|swXM*EKz@?@g;yRs$i zv7B{wnH&dsm*lpN4?pz(s%b6}dPBZ4L-O|w}ffs%x=Y#wj|H*(aZ6L;xH zy`2jdQ=%3iBzQSNMS*4~y)a!{?s##0Jw@J@6BHdPBK8?YY&h_MJMY@{0SFcDah9FB z+Fcs8JkE))Bs}t|%c)_rWmT>oPj15>e^^W$wP&1AF3F#*n=zr%8=&l%gTvxo~?-$1{!J!mcK za{TI4GP^$m>?dFZR*cTB$T4maU&eKLX^MD$4TpR2$YJ1Wgfm&jn(UEe8v!mf9l!ef zkF+9U6BAm@dN&8!kqqe&qEq(>Zo>QXw>Ix_vtc1fgN}mdR82($<0yIp^zNOuDiJ%9?sc zxD9phynOEpc?a{H@As_bW|!q>?m1-NNEviGv*Q@>cwgG^wjJU8>1VxjBK)KRL_`A8 za~>mN+&FWuQNMeach=9vaou4Qx`RCGR@smDDVw{U9J5&sX0>xv|Lez}Z}acjg$$d!+;k6C-x0Tf z7A#c&jqT$J1fHBdceTWSTfsD=VQ|nnOy=a;yvWcH+K(FX;QpufXM--c26%3d4Tg+H; zZG;f}s3VkofEE_~U86~p%^3UlfG3#Cf8oiXDmzB--n~w9r$J`D0Wo5liu173h>FBG zZ=LKfs+6Pr&KEJFuJbC5e4g`rpV%l?{aH5;MzHbBeU`4{-H%m@91V#W;3^uyhD1!W zhe?vgkCG((cL)pw9!Mm(n3TE{tO;C3GuNel?ZOV|iC^ur$ZZT4sqh!aB~!RET57eNDfAH?ZyU=m9tzeU50u2ZXZLPt7dVV_4<3*p;X$)7e5TKF$jyGCz!z#lJhva>3lq9U{;_^A5 z^;t({H~uu$l!Tbxj-{n*V!oe4_4ko*PGT+@h*RB206qR^;NS*!gaXcCXq)OMaE%Pq zE)uK*1?=(H!Pfv}@%P(k`@4_#A6z?b<48?MN`78*-Uu*Jdj_1&)~XMIDNo?~i~hhJ z!!h>I?$1wK&w+z+PT-9NOuyhz*I!*}_FOLVyH6Az!^dAYedo6A{xQsK;k}jkDSeuQ z7?p((k7P%JcEsom%paK1sD)CbdUbJTXHbRHQEL53YB%KWmQ!?2gwkc|E`BuMnHFdM z_1__09x^}+*g^L1>Omk#F6>Eh6rkJDe0?kU(8z_i7g4Uh$ZNVFRXzLOxr9c5=HRL1 zjBu`YLPUs)`Q%>49yI44E8Ms*ggUDd00G4>AYonSm(Ne(BNZIU#&6~I>o2sFNao_B z%!E)`J~%WWavBOkG1jDE_A*H~Wwr{^5QURMQJZQ1HrQH?xk10Fqj`k6y9-T+q;W3> zkU=kr?_CA2evfhpUnw@YV(-A9le(x-0W@G{n@VM?nKtFX>CesXq$gpTlD8r1AAJ)u*h37 z=xsRspwK7~sv@sqVJAjJ&hzB`g=Q<1;I}XX>{}Y9cmgm=+?IsC0gCzOCp-E)68Uto zS{l0V@>2xwVOAMG&l4n5I*sveY1H-!z;zr8#;(Wddry@J^*?3*Zxsz=qjOK;T7Wv@ zkch=Ow!qsWh}91;sDX0I`HBx%YjR+R)H3k^N#s+jpwTw}%1w1^hUA2FA$7p zYSSIM3@_dwZ+FA2>k$pE()T_T`>mI}GIzXksqu8RAkh3Ja_$TwpTBni&gC#wLo3^S zQg?o6Jc1=b?1y0--ZS4e?~URhugOyvLGw1xV)6C*x)>8O|{12%WP_N&X# zx2*`j9TPx8H$Z&(nDB#?%z6a4q-LbIeY9OKNUb-a*uwc47X<5G7^T6S4#`PWfuhew zA-8u6lCX`TAP$%njn?142&dW+;}*0|$mPJnM=5L*5pb{>-<=v|r}qpMdls+5&m+J= zp=kjJ1!=_VCWu)w4|m6S{VKaS`t`OK*xeJKZ_?UlAUj{kp*RKs3u2|n@pyC!a76?Z zVKK!Mx-Gre+ADNpRY9VYg(ENH+fpCKsBQA5m%epc>yUjueHr+C(+L-RY})2Hc^U`r zz4RzxIsmgS(-=zjNxSRxFjv%&FeVl{A_7# zI>61FW1vxEiU&Wo6=a6p*6zYj80oE0(FY3Cu*)x_@_p z#92`alxvXqcYa&Dz2%NGerg58?9F!ay6|U zRz@p?RxcFZam>xC(-4zxul%DkDK1Pq6n(bpD-WE47nqe6X=3`L3ov!pYdq$V3{*mA z{YHV`_{lw>au|bT3isR(4U?j%Db!0#lem&6*EB`J`}AI!bT2`IpzBxPGaW4S1vNd{ zw-E=daA|iR_bTX&n=fdnTw!$?8fYb1_C9}C^H8%YOindp9 z-<1^)I<5XBd86i)WVnnDwKA3PAH1NQnlD{ zP9R_PxU5XLV(=h2U#d9b+C{zovgdsipu_hRbEo6;(89XnHA|=<7N~4Zi zotrK$6-@$S0>t8oj5NCE+fx-k!|z7%GUR?v+UU6oCm_B_A{-o`@B`rB#vi3GK)oU5_Jp&2#EDF$r%Qo(h7in*jF;Wy9~Tq zhlepiYLRC!wv~(WmX@JMu^-CMauZW;(AzzT{Cg@O%CgZn%J8gaTYRsr{sTDZO&G`G zEZQ!`WkxHR3=}tbpeczknslZ{2?noSZ z4}o^K#lZ_d#QYo;$v-Uc*(m?|RqL&fuwdJPxE%Jk6=rZJ$drOWKM!3k@z|UgHZbT9KW7Fv2`zJ%RL8{rWGwFbg{CuIP&7dfFQpfL7 zlN&tnt$v&ellr8P0P9YIx=4M}wH7n2Th{qi-0mhFli?^klz3PlgbPEk!q1Mki*oj7 zs<~M4?WQdDFwdWu?j7Odm`P4CC23rqH_4%1rIF%$PN82LkR38 z<_P&HM6ZdT3}HpTqF0m5PX?0~Ls+5&ac0 zaX-90dEH|2qvH?zc@6WJ3HBL;u)aAFOPvGYUq1w_^I9j|&@WL_wN9Du?gv}BRiB?) z2ki#g2BGb`8*~Ssk=V#a^a}3$DvPu;W&04rlqlRb!w@A<`_>glyX*{1=Lwx{wO33b67)kVFBTv)-Ymq)A#Oh zlbWAdKSc;;MuJC9B0BE`EO5O(-7P+2YTWohznb~7j9%tCYxEi8Nwd$^lcPU^<#Asw zFDjwS%gU9~7=qyAV!ke67Z@cm3clfYT0xI&O^MMs1mJ?Hs! z$Wc__c41nlY6?38P|1&(nwt8_&Fq7pYyQCl4&856=UYN9I)PkgXN8?=*EP|fNk73f z`IpthU`EuYg6KX1uUIMZD*M>xZ;~&?L1A+cB4EJtpxr0%e7nfuy4BfEc{6XB&JrsE zyAfr7yxtePmo(lDQ$Lm3V`P;MPLL}Zw36Cq#z+r$T7)%K(kgJvJ#yt}@Vql67DbK{?Ti%a31;L5y+9aKj413; z{NpiA`}e~71{O4jE4N6#Tu(KIodX`~>ziC| zM*yx77;9|f@@3}o0HGPIQwf8uD3K%>%0e0=@=vC>3upIRiP6E96q}}7NfY<#5_1hD z+h#TYv0W!0@YKc)l;hb+v8P%-0%$j;c-Y=tZTjn2$EtC!1+PW5diNfOTO`k2w7*){ z>0-Ai@jWg^(Hw8}q1dCI`>^5MQ(qF-kkrPLVZ{sGGist^AeHyPjdPJU8|fP3=`VV=@25m>^)lX{S#+7tnLC(iz_h}?1>xn zh?|0J*0*BaHj2XFeml0D;hh|G@zB;=B^qYoPuUW*pZ@AK|89|_J$L^eeIMhO&!P_; z-g=Ga!bXJ5|62i0&{dg#4hrt|W6U)qemRKqSy(SQxD2jOOn+a;Iu60f+<$9N!z}u# zY!Z^g-qvB6OouLdcK=%r-*p`hcu$%BF2|MQw;q$~;LQoT#MLMFUkqciV$O1@o!MPY zO6h%VStnF!>0X4h-<541;Uw|K>n|6d@=s?*MyT59jCD?)F7c3dr#&8wjD_& za3>wkAI(jA6!**=t@Z4*jLSYbFHbqtQ6O;4g#;gU(IDR31jLksgE3vh7pHzUdao}}k|)Tm14{QV zS0R1VANJE`E&*2PGymv;$Kx5GVuJWT; zZIfG0_4&Qe?(ji2Ql0N+cohjWv|0;LoJ< z^M~ROF*!C6NaE06cki`&a?qi3Rn#h()Of?I zkUv+UBSvru{3aPOz4<<#nLctR`V@f=CMnLadWfJCH@njTk(rqI%H&(vhTSZ8J9^W? zr3sz+Jhxafr)h^N{y*=BVf>nfu{G=C8D8J@wV><5)oDL76e*(CX$@p?mls)`iK(1a zAmM=~+-8=CddEH)06-QkK z!1EKJcuuPG-pPDno*?yj%*(5xCt_2InzD>pBJgJj1jB=s+VjECes$}R=F`iS)A>%= z$HQFv$zSj1{C_XEtuJ9L-rQEuMznB18)(jwWfxpSD78ITND(6;9k{&)?{DYhhAH{JUSyc~rLHt%<53q6h)MZ!y{phW*HB$>OO3B+5&O*&^KUG( z^CMiUHHjsjN^yJ_O)J=WBz5N*>0f2+DepeFd!soDKm2OzlWY@yna~S8Vs~WbEj+_} z1b*n2xv5!$I*}0znfWls8FlB?uKfy{xWK~}YV8CC-E-L^F_Kwxc@NeDc5hPX|Py(n9{J8}E+R z`W9+9YP+L0Py;!lf%}w{NqGhjo!!)Ci)O=VtC88U7_PPo`O*a!KJL!vzn)*k@Zl5C z_xJH*TT20U)ok5z2h4l1s|=e5bb&kEL@n?vCAJ%=rc8H$6o(%dA|9fyGh75t6M9KMW;EMP;)+Rl;)FU>4EZi<+ccq#3@y1?}Or~ zvb@JD=o4LJB)Il~==iDuW+hkLA)3i1WyiYOF~RFw%7U-;`3r{{0Q+S7>lT9r61sJ# zXpj^jex9-`yq5KE@8Jk3xyU9ZDhs$w`beohC-FeGikRftYqLH-M&PvRFut~Up9aZZ z%8Kh180)@i=s?k;U@<%deqJ@pt@4m+IFVS<+IF(Nay6?QVF0Dl*-AT04Nef7G>I(t zKtdmWxk!Y*Atqn{%=9Q=;T5@b)TJ^%^QnH4-Jt!>RB5`%n_yxQ4kcjXsto6Q-X;er z((yhK6MWbbuE0ET(y_!mLG~7Wxh>+`HXvE0JjXLr!HTZ;K$h(VEc&JSpQ17AKplR0Z_yoqq!v$%8ZJIFbg>NUW6XaeGL3q0S*Ok@2T zG}XE@+rH6gH`-m(g1)JX$WxPaE*AZ!1Ew-FGj*bCQ1bMin+cHsHZ%!T%3yruIbhU) z=g=>z|EL7xUKMGvI%p+u0evqXUIo`x3$;oj?Ia_t*;9|ZnUgMWGF}6T584*B0QC7r zO7W8-l0R=4igToa!)$M@XF_+#aWb%foWng|h^WgK1Z#qK^mHe?Phm3DT6}}Z5?+}J% z-bTjqGa)t;;4RWrT^{Df;+~-6HrQaTVQ=#?3riq7qn^svU(F(i%Z`(sTI$Z*h^@Fd zDl6epAO*{hXUTkktHp0OQ)fLx^6>39#Qi>hbGPt|)~-V-q9hIvOt$f5b;y{}5v*`~KGW!XkTS zP3?iFE&W2!u)=w!#D&E9`LKdsSySj5V3SXYUa-|7iRJOK{9g#%mr0SWNa_jy)^gEH zsX5rhrRFb?ToCr?)@vyew(m&GQVil^U(PKz`KKsB1x99eImCf@ZV+bI0%c$FlSWI6 zL;J{&G)unSl%i-!Y5F`T>{DxjQ1BOr)Xf;R;O^t2RsOB%{XSuO)>&DqHbP){*cUa<5$JoN?T%xwj4Z6mqk3+M+|0zCci!-{2KdN z`oVz!B?7sxwO((^>x}jQABGqkb-tiEyberheb zCjpQ^^=Oe2l&*8XG(;hNh85GQ&a6{JLG2=-O7U==g&i?_+^EGAyn9VJOs9%+^V@yh1BjZy=D!t-(~?Fvb`} zae!$#`xvy*s=SwLbB%nL0iURUsoTN}$R)b&hMH1O5xZ9H6sPWc+ z1G&IYe8d9x>L-^*Z3Q*-3lrfo%z3X)y>An)M{mTITLKwyMiXCC!tcL#n{*(fhTGVQB zoD;g>_j`rk&D2=;WXv5iAka?OE<{zUJwPfx#q4E}S4%SA*7EyWjEm~gvl##PPwNA- zWjBWf@uB@^aCU`tB{hMo2tQ^yK)>}Y7wfQuN7Dvpq+R@kW9uvnl(4Cq_)m`}i?R3l z54V5-`$PB-Z}$7bSSXm1QH^!HjDIr~#ZpG!`$b`-LpC$=deYwjr%sqZ7XH?DpYuy%Rc)*%YV z5eK6^>wmaslI|?Qh+%Rg0Cxcf1OqfMd$#!VoNry_@gFP;W~-$-;%Y6{-CnfT>0@u) z*dmK~a9e$Om}>JZPLmn00br;Y&Ki^rC`dp$TECWsjCF*nNVu84D2W-aCcEWl`EZk00(5z1)wHnZ6Fs+@sIJ#ik+ep$MN4c-ok$F%TK zW)>j+{}F9>z{m#g^~PKBMK}(dbRr>+X{xkk0=5%L#Pl0w;-<2+6Y6EyJtEW(r)vYb zhVh+sxAxW(p|bU4d+Q!D55>W6#CE1d#+*K3=+&E`TDc_QyHe9uI}e4RnF=4*p)#q0AZzB)40J?`gKC!f3nJ|S&qLdk`QBaEG(esyzV z2u2UtT?rJ&Hnswk|N336xgSvQ(E^3}jvzn(UONwGonWUo6muUT61`K$d7Vfiw$%cA zvigY4ALx&E(hYn91bz?BrrDup^gvE64is|+sf5D1j3m2eK)gz?ILw-sdT#n=zo7fE zl)BFZT7pxC`Spj^d6m&{9Irh_h_N+gh;7)H|Usq#Qi}AQdAA|a`$l+ zv05*PSB(`j-jo6l@gLSWIp4!jci6?9`gxH~4tK=k3&cX*;4wjFm*^0fBP7k*|5nCo@5*`c zvw-BO9JNkXajxUutNh!M^$}U9|FM(At$uB1W>~|C$>n*cbb(Dvwh2#atPC=6QC0}Z z^r8nh5MI_cw>(jgy=||R;m7qhTIgvqS1R`z>leJ^Z~C1r8s?b-gJ%8HNw{QPYY4{q z?M%I3xgH$0cV=q+Vh$*s0l|-9$G<47qHQ@uEbi0*mB8H6rcHUD>|u-;7ldRgqXTd1 z8}NoqUF7mWMC@9LJo^gM?B(U_F4Mp0)`ixX)fmX20W0|WWD4o^+K-QSFlRcmA7L!r zg5b3(EP=2X7=WAKpnpM4HIO?PX$w7fn6$Rk+(6bmqETygoGi2b4AfQDBva=W;CaxZ zL6?=PALVz;XIux|VaQ<{lpX$hX~@G~EPrhbtDSu^Wv^lNdmtMJ;YHtc}M zv`S}#eVwd*>F%VE)ZG;se(VYCu#MaTOXBr63W(VPyKN%o50Er23(gCJza*z_H<_<< zQ)T`922-Abvkbl*oX)&DZzfwug~FilW-^|Ef{+!`+8!lu>}fr5 zR~s?;vmYHXT|Zx;{yKy!-i9UFzp^X`+wd~sFo|iD(Lh=}H41X-U?pzJR%yae|E(=r zZok1O7(1$o&U~Jksozj!eE$#&J2{zF-g(~t_+EjjO~~Z&&A&VIw3m+RYP+MqcBY9T zeJ85l^1I?$;uoo@Z&n5k+Z=UgdBDe?fUSKTfl5UR6W?y3!~QM>efLXD(%xlZvC2CD z&xnRMe7($5o_@i4$f4+~g-X0F71yMKbvv?rZQP!}?)A$=1&Et-xbfXRpk)OPVgDIj zxl?RM5FK`e-Br?QOi7bP{Mn2c+q1Bj4T`Tsf4$^a#a$C$+84mRX`;YbKSSVOdcE+x zwZFo+)0ap~|MEGv9GJ8FMs*i(8G(@#M6kiX>f)LuiRlw=0UW-8e>i+9?-V13YrMMd zRv7x*Mg)y|mt3Op~0t!)2b^<}@d}Do)k&L94{E?gvN`I9Z@TW|>{#LzPnJ z0W4x+33mEmkXf_jJ$0W)DO_JUFz)`;7c-~Ej{WYvSj=E7X3f87gjWn9Aci0rXrHcb zjqen73iPF~eVwv9W{r^kZG^)d{6r;MMBW``?0@cl=lGnlCvS)R5ux-0T&_b?+*0!e z+&6SffyBEu%I>W!T=pz>t#rKG!+#8x=!P7|Un7Fv z=Dg_$7@NC0G9z)HB*a6`3+8+PE|vpU@Acn`)u@}|tIUHi^r2l3dG?`f8*L(~+auj_ z8>b)BPZGfon(iQ}G37sx?@g-CC+}fo_}0uVEF%)z26_cV5HAC$+=x(FDo$I-h8HO} z=US(7m2Yp8(Xo&k5d&j z<2t=HelR53O+AG7^vw=vrBzSTnn}(*4`PTAHb}QW6x!qDbhvmU1a!WYg$Td^VwMGo z56%=ml^%eL>;N^3+;^D*DqB+5H4TU&TE6C$!@D%;u67Yq;-=PC02KlYZxM<&�Ja zP+!61D1bl|bL$z1yCn2`>*UJ5b2Tq4mhZ3nWc*%gmiwXSIsZX*2yq=NNCwp%87KkO0n)R4nl;|@8yh>!oZl=z}K7OfubEJ9P)u$3qHev+x2AK%d&uWnGvc( zS>QbxZzAdb3e+XVkM&TI;3FY%nJ4?oQghJBrCx+fE7)gKX!l|WyhO@;2Y;B$(LGbv zEo|9^9JgOT!?B;3KQc3Om{N9hVxy!q7P`1ucug}}9h)lp538z+{3lJN!9tetq=X+8R8NX^N4PW1-ux!?%uAX@>~ zR|6t4*=CC6AUUi@LM?SO9;#c(IMcP*pgsa1ub2?7DqF~S%lgUfx0sig=6JOV=TQ}1 z@Ka06msv*TxfOzq<;?g!5Bu)5#5LCn8Dy~0{oT33O9DAuik>2UwbL+jz=Bsy!%o?c zcNZ^`Wql8mWPN|`%F)7OF<`4TlUnd0Q|J>H>Mkr5ZyrjQ5+ijv>C$ z|Do?C7gSro(cx@Adi_;K>d)Mt7bbpqds)MszXNE(UqWj&0v?01xV48cHn8zVb`r09~yOYtqPgWaRSY_C>S4 z8dRmYELZ;??v9*4+$lFl*xw?8y=$FshSw|^Wa{Aih}5QvIK0^N0NK?J}s+B*-0hUAM>baN!k zDVrS~n?5Eikw^{$L=RD6j&)4#NJYT6N?OIUOqt$iPKY#vU_Y498 z0wnJXVau@;nh7waSp@p4=s?{;Tt=p&MRpP?)c0y9CXx|7a~TOFmXvnOBy{CP!fte* zfnbe_d6_Rsb~v`Zn2y%H_{1SqMu|AN^sO857p~gsn43z5TIehAc9D$U$;f)%)i30~ zco})N_Yg)CLzBLB2M>KNgrR19wBN!IBpQ8N>rueY)nj%aGrvgKWpf)&e8_esma!CQS}XTT`UvG4JX z7j;rbik|AX1XR#`IQCE&=jk}exl-CE6sE3XI>E6*qxpAi@2*`FoIBYn@{6%A;SGNEY(5EZPZ4)@6i<|J;W4*8%ouv_5nLRP;Y`BY zF0!QjTzhCeBf7_M4QEC?l~}N5h5;w@#%wXwS#8sQD{*DHTH!@E*`RZmA~A-MsTMqD zRp1)XE_pE&;@CBJ#o=?ntScQfh?tBj#r2SW+9(EBAMP%c7s-|-ui^qJI)zxCEr;(3 z3cH*vEqVLk>{b(oy|gSQ2ta9-ZGMFLO7|HeFNV=sgag53Nsi! zol~++(B}!6@8DqN_$DZ*0(PoI618&y{RV&r9x*4yl=;$Lj}yHVYibjD z-~F07`!_xR>BNZO7S^x-zOG3j>dm|u?R!?zt?qI@-kxmPLtP3E)G#Mhx}c@|74u_! zx5Cfqt{?xL_{!=-g8qC-a(I!J6`TUXkAEG6zh z>Dv1PD7>f21E}{$+g8Q1GufZ@yHDN_z62WO2j0Qp(aGP(XuvhtP2ra8%A3FfAo@ZM+XC%gI1TH?lUK!$(lZMy z+B~Vq2P9V93ClQ}J*%<_S}&G-`wFRwgyu8&U;rbeTY`Nhkn;>kg^T(fv^3?j0x0Kh z^rWM(*_7?G%P3_#=E%8Qvk(>AH+;CLj3XpleoL|HVZ$SoBR^Ani~E1JSSr3F z0HltRNd5~a{e8K%GP012v8E@m3fCcM@%hhoznxXS4BcQY;WO3@ytYVx=i!qK(o9sv zm#o$cwfY-{2y4|=d4^80%{@28U*1JA#1ep`0=fg!ewjMh2|Jzgz@$e_5Ja*o{3&uo zQekm1m(|S}RX3NUBgGIjG*dB6B=d$FFJs5ZFh#teOv_F0GtV7A=eoS2LBy2~e zW0pVdVXkH8;z|&>%C_7V6H%N8c&o5faJ3f%4CLJ>2<7Zh>ka-$?MqbFZm7-It1O1c z{&)K7)GbBV+-0CSaJaPx5D7{u2)ce*-;)zNO0Ov(q%L+e!r8Yo`Rj#S=W4wIzr^z- z-@KY{Tt0}4N8DgE*Bt?0rQE-e2WFe}O`G;FPBnO<6NfJzNk_G!d~qsO;+7P~XNHF; zl}=o6DTwAz*|5Een&#m@yRg1{Dtl0N^*Od+2y^zSS$OxIp+hq3JcWo~nJ3r!;-;2& zUE9bIEmk!0$j1-iJnE0)RvVS*-bRmH>S!dDSpND4f&!`!w+nBGSKp6)X^_eH6ghuQ`-zxNac#bf>^(gantocW@FLNUg$V_)b32dJhr7#`_Z-(6H2nxQbm+1 zQC#=3m@X{saLnXZHA%v_fTc3J?g5;*lxSTJB+E3lkVN#q7%P5mRIQoUUj|EA8cMpw zt>T)RMDBGSXWba)hmkB;ov=0@z}L)j^y`D@j++8RD`QCEQtr{IiEAFR1i(kW%T%2V zkR^C24XNR$!e}*r#2)5IVg8!%DvQFfYq(HNLB;&by$^+hPTG~~#@=GMr~BM<((hP; z`jdrWhYX8+o{9vyP=3c$~) zx0X8+&H^M0Lh$wH#)Vbj54rI3)ZGmg&GDkg9(Nsa^Obr9$6kGWKBPlH)PlJ%QuzVi zgXbylSzWy& zfX>fK-~KI7sI|NYB947Q9`x;8?4b#Opl}A3^vahOw5zyyKO%>h`!ay$vH*r+(h?M6 zhz+xDy&$Gso$*daU8CkSOR~F&i>K@Tl+>41*7D+gpH2$f^>u&OTLLwxe|ocS1c#Wg zcmqYSpzHytcy4*+T+Z(ci|N$OL94b^u;Xse_?-!{VhhXuy&losKsdGPea*T(dA+Mz z$K~>=`R_l!nC6VdmM+_v~>M^D?ziIgGp$c*j6$9wgW)4=w zTa!)(Q`SLl6U`$fWe$j!u)QCAh`@$N%Rc?*5z{naAgIyRdZ>USR z2?}h4*vWHfbmdHJ$}5gI^a(8{-WS1qJVB65jcW{BZLqMlP;d^$cg9KA;f6d0D&2OcoKfld_^Ya**P02m9c6{~oyRW?Z8zb-O7!z+z zX^=li${|^LNFw@E$yEF0(4dpht8p8Khw=WO*q6EWiKh?uZ!Ln**auXcRcrVBeAD?X8fmjRV4aNLd86u3%cj()#zfIvNY32SA+JPhBthP(O3QX#K6J-m2%Lxe1Q=|Dwj~^rnMd?mmC7 z3eMzKlqXT8D}ab!q}1IbtpO8nH}uJeNa_?Th?|!Hu_I9PTfWq3J1LBx*X$!FK*cVj zPA^w>_qsWoiG5d=Vvm7PgP3i7*<+wgiAeJP&U~7Gdt_X~5rYmQ&}-aKr93`edpADB z>7SiHQv#6>l@4opkEqkR8#i5`j|BOYL0`)!XStNKq?>7bJr zdkDV`eIf;JybhDiyct;p<_nB7i>2NK&B7|>IdJ@{fW|WP&e?b0Ht~>&x(`%Ru28;_)t~q-+b5;8Bw`)Hm)BSlCa~N~jGU;#89jX;etyI) zj|=oSlYI$up$TnE=}OOX&dNUzn`j77`S2c07B1qx@|RvA?GN4F0iL526nk(H_o4jl zDR(nCY*Daz>6+h(rUUCeVdYL^VJ5K07aM+w?49!%eB%P15yb79dnSaClbHCFFSw!> zm^}8TeIjp!GR^NmcSel5tZFQNc^vR&tmLWfSk(wuLyN1I&&xVboVIL>SgMIx??mG< zg89SOJKcturFVoPnlXUW%I_t$zfTa{{Pq8cuDb>7B``>=+7EqJqg#|>Z@3f!xm2D+ zv7O6j%(4fkAzQk}tJlexdr#hrd1qnQA@?-B+Z@F%9pG${N5o90OE~6U&C8d+7L&7Z?o#%K7S;y1-7h8*N z1+A^5svkB9HwbFBp^($CFGIkDa)|{I^K(vnYTS)R4^2Jrmvx`L=7jyMmo|C|YgOz0 zAT}q`W1+(gw|r!f^05*fzJ-|?<==aJm;6U44~((6t}SC;@o1_KZ}UPGG_HP(8=QW7 z$w2#qRnhdr1|sj;<&V*-#?r9Q3r~;GI*WcK?FWvd;Wyf9#@+Ugc7Qsqs6O<@ zE|TabJ-*@6xB$o0kJfCkCH;)sB6V~NknFCgknAP>qQtJAIrUEK7axhQv?V4QcaRe{ z^%9*)n>GGYL+W5BXoUpYP~Ru2@hZG@8=Jhy7{ueJKf>1ob86+sM#-G!uR|tdETRo6ZmxS!{mIU0&A}{`LOkjU2$}dxeMLG)bl8low<_L8A5lf^SFBZ-iSgfSwAP_U_cKA26Az&C6|;vRW7zI6g(^E*j*$F; zVsqJ$nGkZTf6(#cE_DppZSHMCJtyzMY`RYbsO^jlC*4jfT8dJz7$;8#CR`X<4rZS5 zfv*;u;w+HA3ThZPSf8r4&BqA&P{4%7Fns?3WX0=y&)a*9n<|bf)}43BA9!6t!4P>X+mrsSAW#1$!+$yDc*x?3nl|xau zm@E>K(pDhAut(%BeQnPfm!tYdLa3;==pGzPVesG|4xGPyvgfiIbG(k8vE1xmmhoDr z2sdy(*0;RcX5`5CCz7bbOr^ufaE&v4Z9Z_@T7(Wunf(9||xYS5qgvkygN zu`+FW2OJOf5SU=$?DMoIo**g#~KbnGnANH)QD$2xX4RSwycvcpNy6=uk2=e<+muB z>V9lrx_bNGMlH|`|CVn=leMSYg3pMA0TI;oL!zL}#MtCvv8mzPe>bx$w$a6 zPf&EGZJ^O5xPe?R>1b^Vw!L(8aJ?uj5Z{%XD!5sTIV!tX?N>FYG))EVwhyj11B_3< z_8)FbReDy2oUZZuz&sQkfbSch-yF=`-a%Gv-hdZe7JlH7h6E}+U2ec@c1h25%U=6S zjZJ?rSiPSFdoNk5*LNTERX!!j226*4} zL^TD_0W92dhAO)lht}z>KhTa2 zIhG&Q>d|zZpO6?1ykx^MqZEd>J&2h)KYO94z_3+)toQ9cy|Gy!FYQkN2&gm3udjvH z5k{C7e^$O^dHeu2Zt*NizU9wJKVBO!y{$GHP zfwG6Bas@J zH+cUNU05Cskxvq8qE@+fgu6^tIG|@RsE+5Bv$c!wV4qKa*j*Phl4|-&n~o&E-q}w} zNv;!x!vWa$L1yD%08~+P4yrr|?t^8JU4_`04V8wWz3xvM!9N;QsApgNKu^PFvDZ^l77dsU{uGK?&TeLIh5R;*Js)GiMzZgRj8aUr?l&Rc& z^}ckviEf34-(5k&EPQbPQgdahfPvid0V?RXw%Wsu;iwDG3C(u6Ji}K z(G-yysmHGrWSbQ#S185B#Vu%PLwnds6%E4jBZ3G2%bQ}UmGHY|l!pylTk08g1`sox zKi!Qdi~z3g`mBO&Bz=}Ttg7B}ueM*N}_1pe>15*ha==z49)K&T58L( z#+knnpL)w@M+WWh(~HNEha~4bu5x96<7k=x=4TE57K8jVvqaG!BJ1S+yuR_mXtcS@ zEpEk`~bRdAH0?KO|r8C@0Kb*IbcB5ANv|KZ;3a#AHUGR zYkg53C3YD_@sGz2Uu&)Ly2PR}&m5)&ojqkNB&i`DeN$N6X0hOb)Gc9`^9f;#^hQI8 zsj?kas{>H`|2DjY;(&n&xX-cb?htR=OHnIie8T?d#I>2#i(j(oFCRaxi$JOhtIuGW zwUbVGB-=JmT`vbnr=J)i*i_1Hg!?J?y`xQ(V=Oe>LJFoY72Ushr(NnNX`i5KcLm5f zjSbKL(Fk@ATq}6i9fy7q*ShO6mAMC##ZW6!I^?DhDd zH!diUS;737Ymn^M%JAQBBV^84b-anio$u>Ecp~cTxuK?wUl|z3pvo^1j9XXG{HS8o zzo|{c#6cTh7{A>M1z?p5Y*p%0ghJ7vdR!8c@#e#Z?#p*<2?dw?UA1DnlJnp<(r|-Ldmqq3%RrVlLJGhFhY) zP|+kZ9qSu|8N0JSA+7xf_>*7V5IV}v#DZTzU4rqbU)X#9?I5HyHQYkvHQL_z5`pCH zw`^qKL;<=^*ZcP_P*{bu9htLik8y|9i}xxWDCUm;oS+)`?~cgx<}*}Mnkz+c2lK1<|n81m4q#e3<2$$Y>`u@7(xurOxE&`rd@n3E{}BhS`nY0@H1 zt^tG#P4^%55$#;H49uOgRiDl_XQLX}iBDxXw;G>7TG5D(jiwy~_?L&zn*Y!*#2qb` ze)J_h7&xjYzcv>06~Fpx&8J&KsOJd|u(oAnQn?0$%GN|%Gp+@N)Q=;#W&wB92X6X zTUC>U(i{DNdj4BJ$W8eZK05#TkK>vVV+KUeE~lBkLBK0 zYN?zJ^vMm#c78Wh#b_+HWfivDMdFP^`Dj5smdb-$1NIEqi-q_46kx~=v{_DS3t1aE zeb*@S-j9UWt1T7mz?J<%se1sej+16Na?8@|=; zMp1O)L{=#cfWh0y4D^{nXrrC3-gZ2O>Vqh06*VpSF!I6u%Vu66_I-dMV{6APuF-dApREp;4tau62URbB8oav~Bn1b`su9!h@mU#OUQaf5+tJ+5y z*Z!P3#&uF(pBt#s?GV5K?Yp0>F%^^^wYi}##*f1Wpcr-st5aAqGcsjF@+yK`W6HFC zgOn#ri%uKy+59CekKFa!A~sln)|-BTSuIctJM=rfTS%v}Q?I3bH9EJ(j%ok>{R|`7 zoM(b-#082^dN%A-2`rQ^xgckQ4$67u53CwJ7AVy5xLvDwbXMz-KOav>&E~Jwl!#s zART>Z8^9X<=`AU{0aWk@WO?#P=>ZkzjO562L|MIzxKx~@<^}ffTaw*xVf(jyhXgr5 zfW4|fHr+`0VGVE0YZYJKga6a4`IrJRW^0-8SWS0!p^}{XR%8O0vqmT7Bbp$4y8q)C zJ?Eql_*fA%|HVdPk?(Jzm&UIiGK@fv1vSsYY^|ODEL5i1NaBq+G@IB|=l(Z0`C%^d z!D)u%ir@KjKJ)*)sIeAttN{Lda>sNKpR3ju$!Nh2_WT|7^ba15b9tiaBTBnqkVeMC zE0J*qN@k4TeP%pz34}NUcYd1azI2!NpYs>0UcO}pBM5$Sl?5L?^*DWCv89sXEDz#5xx}ccho}5OV1?y8GPcj+# z6LO%qnnh(w*L;B5Yw+JG?@Xm4K9!t;s6T?j+<$?}qW;?fOrsCcE9wUT7;nviHVjKi zu$lJ1K+Pj zuXN>rz_HA-ZkwvI&nNC~m}BRUMHlBI! zYjcz09Og?Fp)5>TZ7+}y(A1)-Mt;c7!R{1h*6Lw{b9EANdpN@ls&wmUhXT6s+N*=s z0@H>^2TwV)#Tl@qTywx(LQhY$BLdqe0N+|T8#DIr<445w6vF1O(biG31G8xPS>D8d zx}YT}%(8lROJ|OrRt($nGwhy)YQkLV_IOfkO8SZalD64VvQ!ZyU)=99ZL-0o5bUsY zndXnz18Av3^Tvro^F|dWX25^SV>|v*wcy12`QNZEQzwQ=^GV=ix6kH1Qjr#_4{)cD z?DgO2>OqN0Wrx4{INo1t+2<_{inV6(M19-5_RM9OJquQ%i$YVDcPw3BLxl@ANr22I zxcUUY+^2u)^x3L_Kt=c)LdNE^Mb+onOF1ef3;Ie#_Q4HBU8@T*E6csHSGw>&fOBJ? z+nxk-7tG8ouXt$%#KUgVwUW7OsKxo)mnf}#YHtSpDPUeyA#txc;v`sGJ9eqUrGMd9 z3C-^4trgK=D~YYObg8RG?0(C1!B$SfC<|GZYATlUrDkTX7+upAf!};KT_I`u^eDJ% zu>0&yXJ(8;SAHsO27HAi&Wi_+vW$N?p@UyxlTJtbJ^C6WQ|WrTh`LdU9vTN~lKBiM zRU^i4wxj-e4M9nlBcd^#1GvK`qcs>wD%{@TU%%>>Sa-3BF$7(+rq?-_n(Y{__5R7L zX7-aebyeSOSQoiViN^+pc#5EI`2zZ6xK>vi!qhpm$LQ7P91a{Awghxh=Ne}=u9>?d72$r~<_z$L$HxO(QUo9b+ zC1KHX2KCb(?oc*x9WNv}Kph3WXNL5Ozz?ZbrB;U#87S4dxdE??FCE?+;wQk6@Cvu7 z<+7l@WNFH1n{q#A%v_cK$VHFYK;5Z$)oT;*J!tgCd3BMsDz@PpI@=g#37+mtr|*jw ztfebCiR|J!pp-$6)j-ISB6Fg!M!lQHT)R)SRHpZxJ<&y6X=Ox#? z3TC*AvLmU@0~d+@s~X*+6Hli7PK_PJICCqwT!~_zDcN-fLCHXXPnar{T6mL*)D-A) zp`x!d2>M0Ka_SVgE^{i=Q0H!hdT6!2Uu0;vF)~OmrM#f?|mQA z2-iuYUd(nI$@n92^rvGBNxyFHTTd_Y0$xF0etu;*pRsVX+>b<(*jbUm`~RCK?JXX_ zjp)f%gwbDeT9%tnHj|6e=tw?MvUX=xUtm5eiN!uf&9E1fn1tppR0$7Lu)H4s7S+%I zDq(8I&UwXh|AG=@E+Ldy<_4;w-fHQE$#0F1wENPteh;G`yh$nBJdzIIDoIjsxXFZ}+TQdq1 zp>Sj|7R`V7xmhVbaR>7a{+Ont{1eV@OWg?~Bd;9R0qs%^gibAj#KY?7&2A$0V$|hGJJ3bh2t7Q3@9AZ`Qc*6Hm4n7P(eBQQ^YP zWZh`>0#Hb@Q9UNGfT)-i6RyYByB;M)5c#_hM2oxO=@dpjraeEMS_;b{2^ywKwfOv) zw2!ufK0f>TBb*8o{ig0nuxpVg`JhdAk> zEBQ7SmC5C3!Q;lbE=$0wkSy|+EjwuWko^oMd1=|u5xS88-YH=(V~X7f`2m%2rTh4 z3(fyS3hp|Y7!9q_asTU;QAhku8=V+e5&OI3>ZNe4m2|Wgz}OD0_qv+{Zd?BIB6;Qh zot}qbXH9ySY3|7lUZc&r6sgS>4Fini&5q^Jc)v1Bsj)Z+Y)oy8NDML`nGh{ky%4mvU&bX{_e7VwN5yvg-pI1@-3cgi&yjSL^9NgoGX6`F- z=|h1J&}$@IxOgqmi}l0NG|H4LTGXFX)*z#fFY$>Kq~ zH2{COsdB7ir9bVo2IlC?FnMgxv(+LfgtUFmBn8eYeH=)qkMljbcpT;O1d)FpOpy?J ziNr4tJl|(zPq_AcKS&-@-YUWp{iNb6TjF_v;!4iM%P(zf9cAe)6TU3&9iM;Co<95S zmBcJi?b=a3od!^*-2w!PP~H4|6Jizu#)$?)z(8;J!`Wvh&WKAB@<5s~2FvXv5k&vl z4e?~T55_&e4^i=Yh!vYJN?XAo2*RBOyb+or;8J(<9 zq0wWrV0FBHGB)+v=V5F2TgU1?{Z?GO{`wdV@8{%q#`!B|<0}>KqpeqORZyt787H;G za)duGuhZ1Pj9vOW11>@PI5zDe>t9k0Mh!AF??ebSRx32HNnH_GXJA(KZFo(}jEpV! z+bcfd=)|KsXGr^u4W#v6xPPw+uL^`PWPF)ucK@z!+e+r#j6c=l#7d*VWqt>50#~=F z;BUymK_gcUBLPO;AKtm%I_ZXx^PwP$kqU%ui|u}of8OG>dcb)>i?F+wK^AtQ&jsuM z_dj>AULE}k+K0JJ%Xl$djVpYsA}vzu{@sjYvEm|y!yijZ?6V^DJTgnJ6mjHUl$m+J z&3ONh^n@w|c$6@MpaF&NSHZ=-kfF@dEm;efrL3SvhO38Wwx~UaZ^Mf%*K2qSJ`a4l z1Ro^hA{-}UeQoI138V#;s+{pm=T~``1&e5TcdK$oBS(P#5%?G#ddDgGEI~+)f%HA@ z109Oe;%IYqcl8Y0$k5Gn_i*y6&J!D(>~>>gw-I3B@mfwKCezQH^!g26cD94!id*f` zUd?>7T^g{}C9^^x5o~_xkHPr*%7Bz+0RO9t6w0^%sQ%*4uS%3u`31V)T@K>5; zd`8?}HQoN+<=`-;N9I|MWz9N;B7y@vH8{%Vnd4M1O+#2var8p@Cu1odDDfh8l*UZPuQ0&&165pt-sadO z#ueK=w)wJ8OXS5PZLwQzN?Q{`|g=CCnJ0~jF_fn^}HL56lbJT zF;{XZr@IHN{GdObqZ4?ne8dt}u}!&;_BC4p%HG^2^LT;_@qN#Y+;xKbY0C${Co_&n zL*4@K!~~(nhC-rINGsVehFjV0nr&e2Fy{Uv-Qzj;H6NlsWf)2uw+R~6`d4r!e^JzW zb{8ks@fA9?Q#5t2^B6d`O?1?|hr6uL_bCaev_4J|W6b=l!0Kt?6)Su~SY}mdfZ+9} zQywS61h4`-0B#3QN_~#*AA~adAId5snq1HC|k$}{ewq)gqkbkK0l|A9PzLfFOG4|;P zwGD=wFZ#4{O@BRfYs)8Pf5`^+Ldjg;%l-Zj~(nG$e760-c-ugU8((ma3N)YSBctAK3Usf7t8B(Hu^s89+&!2YIi%-O2PUX>0k#b%A3hH?(Kss!>4O-H>af> z-w1&ZfR)MjX3F>q*K``r8Pp0J3<-OWc^vTc{Bb%`AuVFcZ|jXk?>X{oI5h3+Y$%uxb$cMN3I0KcSj8*Y6l==e3n~Q@|p`{L!h%C3NLMW6Wg7 zj&@M`l*fnM4d808%t4v!J!j@DKCox8qLuwfZ{Az%e#y%R-MPPZt_`p!S`EEF>zN9i z;9bD(lf~Ld6MU4BHeSM#yf8}xx{E70#E;ZRE<>V5MukE%m#R*0a6-!$t{BW#&bY5% zJc)4aa7c4aG(b4->*+U#x}xMC=i8hhh#L=!a!7vi6G#)-2qr?Zo6k=x5PQq!b0Wb^|Tdz4g)( z-wP31)n7Ep_ue`pDhv&sYFr9dY8~-OA7Fe`Lq_xJFXgfGg@ohkgby^}7ouHA!0iK? zT8FW&^2J#y1Gzl0SQn?IabJ^X<0vd2{Q6+zIQDi_EQJ#O?OQx>e~YmdKPt8AcqV?V zo+b8->4$jtUwmjTf(s@q2*WL6Sj)tT^vZrFV5tw60r#E?pP|OR({W!O&obu5rPCs& zR2*gxUh9k$wRGfmTrOv1#jo7vA~dfqicbF|xgplf=LkdL*hwd_R(*%e6}Vg0bt5sw zRY7)+6wlA$?OPcSq(&1gS&Wx93{7Q}LVrm~yQU3CGR8y-~?Esgt&%7XZYDR|!t)Q3T-IoqS-MBh-b}t&O&N|3z zBP?qypP|-w0_OGns}R^MF3O&nkVDttxqTcl<^P>^<0yK&(ImQ|(aqlgY&e_3N9Z0r z{PV!UKL>dhoi#bLC+CFYb*Y=+ni}GA?ZM-7Y0hbG1r%PufRqR;{P0}>1@_Hf; zqQk@yngtl1FR3#+?)NiG7M5P3A7Y)*$$W5W(I!?xU0lJ^aL>AtFxay}|MQvux#?iR z;jx!VRrPaIe73AdXn|1kIljzK7v$YaV7OQP`+nr@ywSG&vO%tN%GDW%l;s{P$EJvO zoS!b77mhVLypf=s2r_lp>!!*j>}md+N*71G!>*NadZ_)py+X2hl!aoaaPQ+rjPap(ABUEY_D z*|u|-z5S-*+Rvm@SQ`m&&Q{YkoD5_}tnIa<$h%Xx1Ht|@HEs$a7n+>$PFj6%LH4wji<945sFJRTIA~p18S2*E=#t!E6@PP?SmNSIv$z2^C%N_*~MynE7#3RW>20L4( ze{PDH3zWUpIF@02H<(X*Rh8PCQLjqv6a250&^FjP%Z5295S9n^9NzGV!6Q6({)|@b zjBZ)E84XC6KcPBqzGOG}Y-y)t4W07DStcn4vrfVk9i<*qLeKLyTv(-~)*( z<;@)Bg`YSWE!GOLZHz`@P_-JNXyDn~sxu&)E2Eo@yYjC<8+=jE?dykSXJfa=8$|l$ zE?`Kd=oKh7nHkQRtTh`sBTu|YP#gMj5(>tkr|rWNH|q=ba~nz?Qj5TVx^IUBe=e{yY8BA4LuK;qvGj-~H3 zx&5;}%!iAMD`zUhJD0*kcp*tBK!y>h#5GvxfZFz5#fCC=ix;cermr$oU`9a>LYztW z=5*Zh!h|Cq_!G=b`Phi02Jf{b$~lI6n3$w;d&POjp^2KWHq5hst1&X-EL)9rf4a2K zG`S^0`yGaU&N7xVz)^15c09N3FehN>KIo7Ib_(IN>WmFBJKlh<9FmL9E^XqniFD*~%U5z=5 z57;4Hz}l>tJ(`8wSASf%jk2$XPPuL!WWGSK5x(z8f~ed;>e*`ay#a$`u!^*{-0<5{Jx;(HGKshA8$t&-93=gE`lY zaS7MWzTYk>SWMIo9;qHvu=L39d$4@;pJ@3L_Apj+2eOiHSJYsS>U?GE4rXe1>YB?x z;s+3^)Ygwb4;l~Ok6Hbn#Z2-+$@Q6xtP*|IbR?n4lB-$amwyklte#z?L%z9@P+dm3h8`Ztq_T<*yahEhTbDEp^&49KgkYFdw1gOX&cI} z+&ep)o0iN8N9%pu4nG-?d1pwKAQU7c6~*5ExSfh^VQzsUck*#&Xpqc4A#D1jGb0JZ z5Oc$b$Ejg4dtot~6P+;oZbs{ZMvjgI)7_QkB+2M(DUN~eiZsmZaR~fFZrkC7@xwC& zNx`*&6{^NHz|0I6`PHc)u{lNzy)uA@)ySEvlb@!+4Q6lO{@3!D<xH--s=jE^{6c>&Kn#a&_ z3uF{W$5LWyZ%6m9ymsz3JYI3{Qf9SdiBy|z$s4ZDf3E0ge@ZQi z>{5jwzfi>gPWDx=0=n#$y&|LxX(KbHd-eK8wYN6~mo;7MLCMTjZv`>IgS186TAyId z*E2s{v3`u|dslf~5p^tvMfXl-t$wdhvWYYJq$#^mdJJ00*iQaknhq2FJ|?_BnbWT(X4s;s4G@9PKU6p&~Q#aP4yh4MPp;B$KdrE^@Um; zaXDoUfKLHZ@vDWuvQr{p_azUU>Vhjd#(o>}uT_7kh~1nlJYKSzwDU;QD5Qivr2=4FJIYVbYWY2_y)I=_{0qPSj8N*U(%6o^5~2?ij&ts)wTj5hUj%6KOFD& z4=zk6^IIl*4yn2f*SrWZT_u~0q2tFO&$=h^`)f{dLw^P$i(xzlok~PmAlfIqdmYGldbJO67#7s zJEz)nS$)|`VCf{M5z+Jh3z@r9p63hrdY((qN7QO=?Z+hes(V*Ud#l=H`Vnx}JZ*v! zaz@c`^1|fL{ngl~hR)=KxN$Vh-yB8SJaf2u&yB3UvkPmL0}9q1aSG2=Q(Bcdtzw0v zs|!&@YQ@x>er}73AiCW#iZ(t68$;E6|9W|-hOqC#ME+$|P=F%UeU&%rci%8d*n^p& zd{5)|!RT=3`TE$QKR9D!RRBr|tbE()NUzn)bCL!Ieo0*pNUA+~W5*$!gn{{NtYct3 zbKgj%PuAQpEdb=(EABrNyzblm0ehg$XV7ja`Muje4b`FB&x54KHz3F<6+V>J?ySIp z1F}OS^=Ds7iQ-OYOH6C?$PjL!M9mM~_4}Yh2Ckl}<}a^G@%_nXGF6TBeUl!!`}4O- ziuo8+CF)~Jg-yzS|I$P?PYltcXvjdx28yfZGTs-OrKj~U<1pk1V zkERUi`?UrkGLQG~`GR&wODO7~QE8)hk7QLcYqCS9o6L^&LqSgyIQ_9i5aY1rszeJJ z=(XJ!kFKy>ec735(IwV47NXTXckVOCU~WmwATo)Ilp)z>UP4U!om=+q1H)9_tbyMZ zlP)n`E`xg|$(~SovcVS%>Q(Xfj>u!HNkeT6)d*$2E&R^-6VAd&5}bT51?TI?s#XjQ z{W^b~^C6cxQs?5D%yIt)%4IG8fTA`f!oFYX^QH6kua`1zDd%LpPhi;H3)@l1R#mor z$OuDmKIZuZG_I(7o}m`$|Esn$jcVd*<9JzWYJ;KzMI@2B22fByHj$)tLteH50%Cxu z5Eg+V`xXe43Ze*>UA7zrL}XJWAd3kRLdu#dLTbPeWQiJ(kO&STKrgM@bN|0{=5BW9+uW;P79PyeSNaM^jEWInjBklW!;*u#7HcZ0U0c}L857Q*qNxIo;}yElcT}r2U}LI9Ee_bx;W zxkKOu@R^p(6}Je3H5t2bq>`XY{L(Aa4!4P3LXY3loEL4-#(hED&8))=g9B1?%|vPF*eMO4v)4SkqEbOC0iJ;X$< z$cU(2kk97bCZrx!fBlA6TG9oxo&Shz$qYxFfASQO-SFj-%vGw>8aZE%v;yF!e^I0 z9m`>R*acsHxn_%ah1gj*oVOVr)38QIJxLfqk32Q#X|lCRDzV<&bM}xh2XP*ifz{8h zQ{k0GxF{nq!H!Em{y?(brV#l*b$kzu~a&^xt0HYz6l zwBu#+T+IME!)_}Qg(F7CLI<}N?m2L24|2I#72}v zRQmn#z~*v;-424%x*lsXP7jhe?o$q&9!Ad~E)BK2vUBM#SmtGHyTdL3FXWGqFB8fIl?zUZn-@O;^!A`?5EmzqA` zYykEAa#e4QgsqR6xu_f6Q>P)8e00 z;V?A}pYL~b&l+>?o2_5grBse!Jd*}`&H3-r%8(X$sG2KPBg7c?P7Q|f)}rr%+2V=W zlAUN+6_q2wr#AXyc?c^*f2HIxA4XvdH~SBHGyIOdTO;=Qc)M0FtFVkvdvo&VIhOlbtu#Z zqtm(2BB8l_q6HWfJK3kA)mrj2#^6Mn>Wx(VcyMMYbqJQK34H~eXYL2uQdt*5+rr| zq&io;%;4A;1cBN0gC*0Ns&}`Vf%L2E={A3-XK`~zz^b#owch7_^hSuArW<-<$id)@ zX=*Esm|1>=U0o~14+NHZu_f|JSk}5zX*&coHT+*!(FUfe=cF4NaftpM`{VLtg%fugF{h0w^1UTyT%g0+&txS7RcUS`tN&{m!_RiC>U`}o;UtpP|u zV{5=Vh=L0V+bO`p2-*Uy3}r^S5D6lrfGhY(n1}~V<#F!QZ=#jxMz3%7xTKnOd&J43 z4cD!j?3g$I;sI)zBx>8ElHl|m8T)pomUKSA4)?$)by;-|K1cHtX2fJmImu5dK%(aHOfk1PR!c#K=y4oK)b7vW7!)PyK!&w)QoA_D zDRA&%PTN}@G09|_qC4OfY1 zUT(;}pb2^b=chM($iiSFJ^xhHh}QB(9S5(QfNUkt`uX=tp&|Cj@6?LMTD}ENd`!uJ z*G{IE|A~EpPMr>T{lF`mrUC=~NwohHo%*Qgcljff%BZ7znBi*Wz&Vcf*zoky! z3W80nMIRz7@CK`G8Ih3cd z=TY Date: Thu, 11 Apr 2024 14:10:55 +0800 Subject: [PATCH 10/98] add news in readme --- .gitignore | 1 + README.md | 1 + README_CN.md | 1 + 3 files changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index bb9ceec..03c36f2 100644 --- a/.gitignore +++ b/.gitignore @@ -136,3 +136,4 @@ dmypy.json /flask_server *.bin *ini +*.ini diff --git a/README.md b/README.md index 1db2475..94880cc 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ Codefuse-ModelCache - [Acknowledgements](#Acknowledgements) - [Contributing](#Contributing) ## news +- 🔥🔥[2024.04.09] Added the ability of Redis Search to store and retrieve embeddings in multi-tenant scene, this can reduce the interaction time between Cache and vector databases to 10ms. - 🔥🔥[2023.12.10] we integrate LLM embedding frameworks such as 'llmEmb', 'ONNX', 'PaddleNLP', 'FastText', alone with the image embedding framework 'timm', to bolster embedding functionality. - 🔥🔥[2023.11.20] codefuse-ModelCache has integrated local storage, such as sqlite and faiss, providing users with the convenience of quickly initiating tests. - [2023.08.26] codefuse-ModelCache... diff --git a/README_CN.md b/README_CN.md index f552270..bc4172a 100644 --- a/README_CN.md +++ b/README_CN.md @@ -25,6 +25,7 @@ Codefuse-ModelCache - [致谢](#致谢) - [Contributing](#Contributing) ## 新闻 +- 🔥🔥[2024.04.09] 增加了多租户场景中Redis Search存储和检索embedding的能力,可以将Cache和向量数据库的交互耗时降低至10ms内。 - 🔥🔥[2023.12.10] 增加llmEmb、onnx、paddlenlp、fasttext等LLM embedding框架,并增加timm 图片embedding框架,用于提供更丰富的embedding能力。 - 🔥🔥[2023.11.20] codefuse-ModelCache增加本地存储能力, 适配了嵌入式数据库sqlite、faiss,方便用户快速启动测试。 - [2023.10.31] codefuse-ModelCache... From 81478289e60cbc50b19bac9595f99431546053d2 Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 11 Apr 2024 15:17:00 +0800 Subject: [PATCH 11/98] update .gitignore --- .gitignore | 1 + README.md | 2 +- modelcache_serving.py | 12 ++++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 modelcache_serving.py diff --git a/.gitignore b/.gitignore index 03c36f2..cffff4c 100644 --- a/.gitignore +++ b/.gitignore @@ -137,3 +137,4 @@ dmypy.json *.bin *ini *.ini +modelcache_serving.py diff --git a/README.md b/README.md index 94880cc..5f608de 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ Codefuse-ModelCache - [Acknowledgements](#Acknowledgements) - [Contributing](#Contributing) ## news -- 🔥🔥[2024.04.09] Added the ability of Redis Search to store and retrieve embeddings in multi-tenant scene, this can reduce the interaction time between Cache and vector databases to 10ms. +- 🔥🔥[2024.04.09] Add Redis Search to store and retrieve embeddings in multi-tenant scene, this can reduce the interaction time between Cache and vector databases to 10ms. - 🔥🔥[2023.12.10] we integrate LLM embedding frameworks such as 'llmEmb', 'ONNX', 'PaddleNLP', 'FastText', alone with the image embedding framework 'timm', to bolster embedding functionality. - 🔥🔥[2023.11.20] codefuse-ModelCache has integrated local storage, such as sqlite and faiss, providing users with the convenience of quickly initiating tests. - [2023.08.26] codefuse-ModelCache... diff --git a/modelcache_serving.py b/modelcache_serving.py new file mode 100644 index 0000000..36f9a67 --- /dev/null +++ b/modelcache_serving.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" + Alipay.com Inc. + Copyright (c) 2004-2023 All Rights Reserved. + ------------------------------------------------------ + File Name : modelcache_serving.py + Author : fuhui.phe + Create Time : 2024/4/11 15:12 + Description : description what the main function of this file + Change Activity: + version0 : 2024/4/11 15:12 by fuhui.phe init +""" From 87c7ff34d23b8608bf6df1da6491be9ed26c56cd Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 11 Apr 2024 15:18:56 +0800 Subject: [PATCH 12/98] update .gitignore --- .gitignore | 2 +- modelcache/config/milvus_config.ini | 5 ----- modelcache/config/mysql_config.ini | 6 ------ modelcache_serving.py | 12 ------------ 4 files changed, 1 insertion(+), 24 deletions(-) delete mode 100644 modelcache/config/milvus_config.ini delete mode 100644 modelcache/config/mysql_config.ini delete mode 100644 modelcache_serving.py diff --git a/.gitignore b/.gitignore index cffff4c..b97d72e 100644 --- a/.gitignore +++ b/.gitignore @@ -137,4 +137,4 @@ dmypy.json *.bin *ini *.ini -modelcache_serving.py +**/modelcache_serving.py diff --git a/modelcache/config/milvus_config.ini b/modelcache/config/milvus_config.ini deleted file mode 100644 index f5bd532..0000000 --- a/modelcache/config/milvus_config.ini +++ /dev/null @@ -1,5 +0,0 @@ -[milvus] -host = '' -port = '' -user = '' -password = '' \ No newline at end of file diff --git a/modelcache/config/mysql_config.ini b/modelcache/config/mysql_config.ini deleted file mode 100644 index 2c63f0e..0000000 --- a/modelcache/config/mysql_config.ini +++ /dev/null @@ -1,6 +0,0 @@ -[mysql] -host = '' -port = '' -username = '' -password = '' -database = '' diff --git a/modelcache_serving.py b/modelcache_serving.py deleted file mode 100644 index 36f9a67..0000000 --- a/modelcache_serving.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- -""" - Alipay.com Inc. - Copyright (c) 2004-2023 All Rights Reserved. - ------------------------------------------------------ - File Name : modelcache_serving.py - Author : fuhui.phe - Create Time : 2024/4/11 15:12 - Description : description what the main function of this file - Change Activity: - version0 : 2024/4/11 15:12 by fuhui.phe init -""" From 2cec9baad8760278dfa9ff964aca2fc9b87f9120 Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 11 Apr 2024 15:47:38 +0800 Subject: [PATCH 13/98] Fix remove-truncate issue --- flask4modelcache.py | 1 - modelcache/manager/data_manager.py | 21 ++++++++++----------- modelcache/manager/vector_data/redis.py | 2 +- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/flask4modelcache.py b/flask4modelcache.py index 8a3efa2..3473654 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -57,7 +57,6 @@ def response_hitquery(cache_resp): insert_pre_embedding_func=insert_multi_splicing, ) -# cache.set_openai_key() global executor executor = ThreadPoolExecutor(max_workers=6) diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index a83e638..d6637ea 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -260,22 +260,21 @@ def create_index(self, model, **kwargs): return self.v.create(model) def truncate(self, model_name): - # model = kwargs.pop("model", None) - # drop milvus data + # drop vector base data try: - resp = self.v.rebuild_col(model_name) + vector_resp = self.v.rebuild_col(model_name) except Exception as e: - return {'status': 'failed', 'milvus': 'truncate milvus data failed, please check! e: {}'.format(e), - 'mysql': 'unexecuted'} - if resp: - return {'status': 'failed', 'milvus': resp, 'mysql': 'unexecuted'} + return {'status': 'failed', 'VectorDB': 'truncate VectorDB data failed, please check! e: {}'.format(e), + 'ScalarDB': 'unexecuted'} + if vector_resp: + return {'status': 'failed', 'VectorDB': vector_resp, 'ScalarDB': 'unexecuted'} + # drop scalar base data try: delete_count = self.s.model_deleted(model_name) except Exception as e: - # return 'truncate milvus data failed, please check!' - return {'status': 'failed', 'milvus': 'rebuild', - 'mysql': 'truncate mysql data failed, please check! e: {}'.format(e)} - return {'status': 'success', 'milvus': 'rebuild', 'mysql': 'delete_count: ' + str(delete_count)} + return {'status': 'failed', 'VectorDB': 'rebuild', + 'ScalarDB': 'truncate scalar data failed, please check! e: {}'.format(e)} + return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} def flush(self): self.s.flush() diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index e8fd3f4..8a8fb37 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -124,7 +124,7 @@ def rebuild_col(self, model): self.create_index(index_name_model, index_prefix) except Exception as e: raise ValueError(str(e)) - return 'rebuild success' + # return 'rebuild success' def delete(self, ids) -> None: pipe = self._client.pipeline() From a92ac2dc3d83201b4cc374901203bb12ffdb24fc Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 12 Apr 2024 10:18:33 +0800 Subject: [PATCH 14/98] upload config files --- .gitignore | 4 ++-- modelcache/config/milvus_config.ini | 5 +++++ modelcache/config/mysql_config.ini | 6 ++++++ modelcache/config/redis_config.ini | 5 +++++ 4 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 modelcache/config/milvus_config.ini create mode 100644 modelcache/config/mysql_config.ini create mode 100644 modelcache/config/redis_config.ini diff --git a/.gitignore b/.gitignore index b97d72e..c293fe0 100644 --- a/.gitignore +++ b/.gitignore @@ -135,6 +135,6 @@ dmypy.json /embedding_npy /flask_server *.bin -*ini -*.ini **/modelcache_serving.py +#*ini +#*.ini diff --git a/modelcache/config/milvus_config.ini b/modelcache/config/milvus_config.ini new file mode 100644 index 0000000..f5bd532 --- /dev/null +++ b/modelcache/config/milvus_config.ini @@ -0,0 +1,5 @@ +[milvus] +host = '' +port = '' +user = '' +password = '' \ No newline at end of file diff --git a/modelcache/config/mysql_config.ini b/modelcache/config/mysql_config.ini new file mode 100644 index 0000000..2c63f0e --- /dev/null +++ b/modelcache/config/mysql_config.ini @@ -0,0 +1,6 @@ +[mysql] +host = '' +port = '' +username = '' +password = '' +database = '' diff --git a/modelcache/config/redis_config.ini b/modelcache/config/redis_config.ini new file mode 100644 index 0000000..a1cdb3d --- /dev/null +++ b/modelcache/config/redis_config.ini @@ -0,0 +1,5 @@ +[redis] +host = '' +port = '' +user = '' +password = '' From 2c4672759e5c6dafc84c3ba053c9ba3c20e2dd5a Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 12 Apr 2024 14:15:08 +0800 Subject: [PATCH 15/98] update git ignore file --- .gitignore | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index c293fe0..5f3ccf2 100644 --- a/.gitignore +++ b/.gitignore @@ -136,5 +136,5 @@ dmypy.json /flask_server *.bin **/modelcache_serving.py -#*ini -#*.ini +*ini +*.ini From c41377414386940dc5b715f5db256883e7f8388b Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 12 Apr 2024 14:22:14 +0800 Subject: [PATCH 16/98] update git ignore file --- .gitignore | 2 -- 1 file changed, 2 deletions(-) diff --git a/.gitignore b/.gitignore index 5f3ccf2..6472f15 100644 --- a/.gitignore +++ b/.gitignore @@ -136,5 +136,3 @@ dmypy.json /flask_server *.bin **/modelcache_serving.py -*ini -*.ini From de8d827b5bdbfddd38cffbb46526c1c6031a4384 Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 12 Apr 2024 14:27:57 +0800 Subject: [PATCH 17/98] update git ignore file --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 6472f15..8c87806 100644 --- a/.gitignore +++ b/.gitignore @@ -135,4 +135,4 @@ dmypy.json /embedding_npy /flask_server *.bin -**/modelcache_serving.py +**/modelcache_serving.py \ No newline at end of file From 625eec06e9523b4dc66a03ea8e117c11572f5687 Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 12 Apr 2024 14:29:42 +0800 Subject: [PATCH 18/98] Update .gitignore to exclude specific config files --- .gitignore | 5 ++++- modelcache/config/milvus_config.ini | 5 ----- modelcache/config/mysql_config.ini | 6 ------ modelcache/config/redis_config.ini | 5 ----- 4 files changed, 4 insertions(+), 17 deletions(-) delete mode 100644 modelcache/config/milvus_config.ini delete mode 100644 modelcache/config/mysql_config.ini delete mode 100644 modelcache/config/redis_config.ini diff --git a/.gitignore b/.gitignore index 8c87806..4aed58c 100644 --- a/.gitignore +++ b/.gitignore @@ -135,4 +135,7 @@ dmypy.json /embedding_npy /flask_server *.bin -**/modelcache_serving.py \ No newline at end of file +**/modelcache_serving.py +**/mysql_config.ini +**/redis_config.ini +**/milvus_config.ini \ No newline at end of file diff --git a/modelcache/config/milvus_config.ini b/modelcache/config/milvus_config.ini deleted file mode 100644 index f5bd532..0000000 --- a/modelcache/config/milvus_config.ini +++ /dev/null @@ -1,5 +0,0 @@ -[milvus] -host = '' -port = '' -user = '' -password = '' \ No newline at end of file diff --git a/modelcache/config/mysql_config.ini b/modelcache/config/mysql_config.ini deleted file mode 100644 index 2c63f0e..0000000 --- a/modelcache/config/mysql_config.ini +++ /dev/null @@ -1,6 +0,0 @@ -[mysql] -host = '' -port = '' -username = '' -password = '' -database = '' diff --git a/modelcache/config/redis_config.ini b/modelcache/config/redis_config.ini deleted file mode 100644 index a1cdb3d..0000000 --- a/modelcache/config/redis_config.ini +++ /dev/null @@ -1,5 +0,0 @@ -[redis] -host = '' -port = '' -user = '' -password = '' From 0be02d5d21f2a55aeb3c37f5512ccce81e03b8e6 Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 12 Apr 2024 14:32:44 +0800 Subject: [PATCH 19/98] upload config init file --- .gitignore | 3 --- modelcache/config/milvus_config.ini | 5 +++++ modelcache/config/mysql_config.ini | 6 ++++++ modelcache/config/redis_config.ini | 5 +++++ 4 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 modelcache/config/milvus_config.ini create mode 100644 modelcache/config/mysql_config.ini create mode 100644 modelcache/config/redis_config.ini diff --git a/.gitignore b/.gitignore index 4aed58c..6472f15 100644 --- a/.gitignore +++ b/.gitignore @@ -136,6 +136,3 @@ dmypy.json /flask_server *.bin **/modelcache_serving.py -**/mysql_config.ini -**/redis_config.ini -**/milvus_config.ini \ No newline at end of file diff --git a/modelcache/config/milvus_config.ini b/modelcache/config/milvus_config.ini new file mode 100644 index 0000000..f5bd532 --- /dev/null +++ b/modelcache/config/milvus_config.ini @@ -0,0 +1,5 @@ +[milvus] +host = '' +port = '' +user = '' +password = '' \ No newline at end of file diff --git a/modelcache/config/mysql_config.ini b/modelcache/config/mysql_config.ini new file mode 100644 index 0000000..2c63f0e --- /dev/null +++ b/modelcache/config/mysql_config.ini @@ -0,0 +1,6 @@ +[mysql] +host = '' +port = '' +username = '' +password = '' +database = '' diff --git a/modelcache/config/redis_config.ini b/modelcache/config/redis_config.ini new file mode 100644 index 0000000..a1cdb3d --- /dev/null +++ b/modelcache/config/redis_config.ini @@ -0,0 +1,5 @@ +[redis] +host = '' +port = '' +user = '' +password = '' From 5bbf707521310395e6c86786225e5524205e8787 Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 12 Apr 2024 14:39:49 +0800 Subject: [PATCH 20/98] update git ignore file --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 6472f15..72fc777 100644 --- a/.gitignore +++ b/.gitignore @@ -136,3 +136,4 @@ dmypy.json /flask_server *.bin **/modelcache_serving.py +*.ini \ No newline at end of file From a23f8c09c7a1f28d652fcd96033f39f1098450d4 Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 12 Apr 2024 15:51:31 +0800 Subject: [PATCH 21/98] service test --- multicache_serving.py | 314 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 314 insertions(+) create mode 100644 multicache_serving.py diff --git a/multicache_serving.py b/multicache_serving.py new file mode 100644 index 0000000..23a8bac --- /dev/null +++ b/multicache_serving.py @@ -0,0 +1,314 @@ +# -*- coding: utf-8 -*- +""" + Alipay.com Inc. + Copyright (c) 2004-2021 All Rights Reserved. + ------------------------------------------------------ + File Name : gptcache_serving.py + Author : fuhui.phe + Email: hongen.phe@antfin.com + Create Time : 2023/5/28 11:03 + Description : description what the main function of this file + Change Activity: + version0 : 2023/5/28 11:03 by fuhui.phe init +""" +from datetime import datetime +from typing import Dict +import time +import json +import uuid +from gptcache import cache +from gptcache.adapter import codegpt +from gptcache.manager import CacheBase, VectorBase, get_data_manager +from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation +from gptcache.processor.pre import insert_iat_dict +from gptcache.processor.pre import query_iat_dict +from gptcache.utils.env_config import get_table_suffix +from concurrent.futures import ThreadPoolExecutor +from gptcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi +from gptcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi_concurrent_sin +# from gptcache.utils.modle_version_manager import model_version_load +# from gptcache.utils.modle_version_manager import get_all_collections +# from gptcache.utils.collection_util import get_collection_iat_name +# from gptcache.utils.collection_util import get_collection_iat_type + + +def save_query_info(result, model, query, delta_time_log): + print('执行 save_query_info!') + cache.data_manager.save_query_resp(result, model=model, query=query, + delta_time=delta_time_log) + + +def response_text(cache_resp): + # print('cache_resp: {}'.format(cache_resp)) + return cache_resp['data'] + + +def response_hitquery(cache_resp): + # print('cache_resp: {}'.format(cache_resp)) + return cache_resp['hitQuery'] + + +# timm2vec = Timm() +# text2vec = Data2VecAudio() + + +# python类示例 +class UserBackend: + def __init__(self): + self.table_suffix = get_table_suffix() + image_dimension = 768 + text_dimension = 768 + # data_manager = get_data_manager(CacheBase("oceanbase", table_suffix=self.table_suffix), + # VectorBase("milvus", iat_dimension=image_dimension + text_dimension, + # i_dimension=image_dimension, t_dimension=text_dimension, + # table_suffix=self.table_suffix)) + + data_manager = get_data_manager(CacheBase("oceanbase", table_suffix=self.table_suffix), + VectorBase("redis", iat_dimension=image_dimension+text_dimension, + i_dimension=image_dimension, t_dimension=text_dimension, + table_suffix=self.table_suffix)) + cache.init( + # embedding_func=get_cache_embedding_text2vec, + # image_embedding_func=timm2vec.to_embeddings, + # text_embedding_func=text2vec.to_embeddings, + embedding_func=get_embedding_multi, + embedding_concur_func=get_embedding_multi_concurrent_sin, + data_manager=data_manager, + similarity_evaluation=SearchDistanceEvaluation(), + # iat_query_pre_embedding_func=query_multi_splicing, + iat_insert_pre_embedding_func=insert_iat_dict, + iat_query_pre_embedding_func=query_iat_dict, + # insert_pre_embedding_miulti_func=insert_multimodal_splicing, + # query_pre_embedding_miulti_func=query_multimodal_splicing, + ) + # cache.set_openai_key() + self.gptcache_version = datetime.now().strftime("%Y-%m-%d %H:%M") + self.executor = ThreadPoolExecutor(max_workers=6) + + def __call__(self, param): + print('gptcache_version: {}'.format(self.gptcache_version)) + # logging.info('gptcache_version: {}'.format(self.gptcache_version)) + print('call_time: {}'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) + # logging.info('call_time: {}'.format(datetime.now().strftime("%Y-%m-%d %H:%M"))) + try: + # print('param: {}'.format(param)) + param_dict = json.loads(param) + except Exception as e: + result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) + return json.dumps(result) + + request_type = param_dict.get("request_type") + UUID = param_dict.get("UUID", None) + print('request_type: {}'.format(request_type)) + # param parsing + try: + scope = param_dict.get("scope") + print('scope: {}'.format(scope)) + if scope is not None: + model = scope.get('model') + model = model.replace('-', '_') + model = model.replace('.', '_') + print('model: {}'.format(model)) + + if request_type in ['iat_query', 'iat_insert']: + if request_type == 'iat_query': + query = param_dict.get("query") + elif request_type == 'iat_insert': + chat_info = param_dict.get("chat_info") + query = chat_info[-1]['query'] + + if request_type is None or request_type not in ['iat_query', 'iat_remove', 'iat_insert', 'iat_register']: + result = {"errorCode": 102, + "errorDesc": "type exception, should one of ['iat_query', 'iat_insert', 'iat_remove', 'iat_register']", + "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + cache.data_manager.save_query_resp(result, model=model, query='', delta_time=0) + return json.dumps(result) + except Exception as e: + result = {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + # cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) + return json.dumps(result) + + # --------分割线 + if request_type == 'iat_query': + if UUID: + try: + uuid_list = UUID.split('==>') + user_start = float(uuid_list[1]) + ray_http_cost = time.time()-user_start + print('ray_http_cost: {}'.format(ray_http_cost)) + except Exception as e: + print('uuid_e: {}'.format(e)) + try: + start_time = time.time() + response = codegpt.ChatCompletion.create_iat_query( + scope={"model": model}, + query=query, + ) + # print('response: {}'.format(response)) + delta_time = '{}s'.format(round(time.time() - start_time, 2)) + if response is None: + result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, + "hit_query": '', "answer": ''} + elif isinstance(response, dict): + answer = response_text(response) + hit_query = response_hitquery(response) + result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, + "hit_query": hit_query, "answer": answer} + else: + result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, + "hit_query": '', "answer": ''} + delta_time_log = round(time.time() - start_time, 3) + print('delta_time_log: {}'.format(delta_time_log)) + + # modify at 20230807 20:51 + future = self.executor.submit(save_query_info, result, model, query, delta_time_log) + query_time = round(time.time() - start_time, 2) + print('query_time: {}'.format(query_time)) + except Exception as e: + result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, + "hit_query": '', "answer": ''} + print('result: {}'.format(result)) + return json.dumps(result, ensure_ascii=False) + + # response = codegpt.ChatCompletion.create_iat_query( + # scope={"model": model}, + # query=query, + # ) + # print('response_query: {}'.format(response)) + + if request_type == 'iat_insert': + if UUID: + try: + uuid_list = UUID.split('==>') + user_start = float(uuid_list[1]) + ray_http_cost = time.time()-user_start + print('ray_http_cost: {}'.format(ray_http_cost)) + except Exception as e: + print('uuid_e: {}'.format(e)) + try: + start_time = time.time() + try: + response = codegpt.ChatCompletion.create_iat_insert( + model=model, + chat_info=chat_info, + ) + except Exception as e: + result = {"errorCode": 303, "errorDesc": e, "writeStatus": "exception"} + return json.dumps(result, ensure_ascii=False) + + if response == 'success': + result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} + else: + result = {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} + insert_time = round(time.time() - start_time, 2) + print('insert_time: {}'.format(insert_time)) + return json.dumps(result, ensure_ascii=False) + except Exception as e: + result = {"errorCode": 304, "errorDesc": e, "writeStatus": "exception"} + print('result: {}'.format(result)) + return json.dumps(result, ensure_ascii=False) + + # response = codegpt.ChatCompletion.create_iat_insert( + # model=model, + # chat_info=chat_info, + # milvus_collection_ins=collection_ins + # ) + # print('response: {}'.format(response)) + + if request_type == 'iat_remove': + remove_type = param_dict.get("remove_type") + id_list = param_dict.get("id_list", []) + print('remove_type: {}'.format(remove_type)) + + response = codegpt.ChatCompletion.create_iat_remove( + model=model, + remove_type=remove_type, + id_list=id_list + ) + + if not isinstance(response, dict): + result = {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} + return json.dumps(result) + + state = response.get('status') + # if response == 'success': + if state == 'success': + result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} + return json.dumps(result) + + if request_type == 'iat_register': + iat_type = param_dict.get("iat_type") + response = codegpt.ChatCompletion.create_iat_register( + model=model, + iat_type=iat_type, + table_suffix=self.table_suffix + ) + if response in ['create_success', 'already_exists']: + result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + result = {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} + return json.dumps(result) + + def __update_config__(self, config: Dict[str, object]): + """ + 可选 + """ + pass + + def __health_check__(self): + """ + 可选 + """ + # logging.info(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) + return True + + +if __name__ == '__main__': + # ============01 + # request_type = 'iat_insert' + # scope = {"model": "test_0313"} + # # UUID = "820b0052-d9d8-11ee-95f1-52775e3e6fd1" + "==>" + str(time.time()) + # UUID = str(uuid.uuid1()) + "==>" + str(time.time()) + # print('UUID: {}'.format(UUID)) + # img_data = "http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg" + # query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], + # 'imageRaw': '', + # 'imageUrl': img_data, + # 'imageId': 'ccc'} + # answer = "应该注意小孩不要跑到铁轨上" + # chat_info = [{"query": query, "answer": answer}] + # data_dict = {'request_type': request_type, 'scope': scope, 'chat_info': chat_info, 'UUID': UUID} + # r1 = json.dumps(data_dict) + + # ============02 + request_type = 'iat_query' + UUID = str(uuid.uuid1()) + "==>" + str(time.time()) + scope = {"model": "test_0313"} + img_data = 'http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg' + query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], + 'imageRaw': '', + 'imageUrl': img_data, + 'multiType': 'IMG_TEXT'} + r1 = json.dumps({'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID}) + + # ============03 + # request_type = 'iat_remove' + # scope = {"model": "test_0313"} + # # iat_type = 'IMG_TEXT' + # remove_type = 'truncate_by_model' + # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'remove_type': remove_type}) + + # ============04 + # request_type = 'iat_register' + # scope = {"model": "test_0313"} + # iat_type = 'IMG_TEXT' + # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'iat_type': iat_type}) + + user_backend = UserBackend() + resp = user_backend(r1) + print('resp: {}'.format(resp)) From 10c8ae0ed75e52bf47f3ec5128fc80445100ef30 Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 16 Apr 2024 10:21:10 +0800 Subject: [PATCH 22/98] multimodel cache --- .gitignore | 4 +- modelcache/adapter_mm/__init__.py | 1 + modelcache/adapter_mm/adapter.py | 63 ++++ modelcache/adapter_mm/adapter_insert.py | 40 +++ modelcache/adapter_mm/adapter_query.py | 148 +++++++++ modelcache/adapter_mm/adapter_register.py | 13 + modelcache/adapter_mm/adapter_remove.py | 26 ++ modelcache/core.py | 15 +- modelcache/manager/vector_data/redis.py | 14 +- modelcache/manager_mm/__init__.py | 5 + modelcache/manager_mm/data_manager.py | 291 ++++++++++++++++++ modelcache/manager_mm/data_manager_mm.py | 291 ++++++++++++++++++ modelcache/manager_mm/eviction/__init__.py | 10 + modelcache/manager_mm/eviction/base.py | 22 ++ modelcache/manager_mm/eviction/manager.py | 25 ++ .../manager_mm/eviction/memory_cache.py | 44 +++ modelcache/manager_mm/eviction_manager.py | 47 +++ modelcache/manager_mm/factory.py | 50 +++ modelcache/manager_mm/object_data/__init__.py | 9 + modelcache/manager_mm/object_data/base.py | 25 ++ modelcache/manager_mm/scalar_data/__init__.py | 9 + modelcache/manager_mm/scalar_data/base.py | 134 ++++++++ modelcache/manager_mm/scalar_data/manager.py | 32 ++ .../manager_mm/scalar_data/sql_storage.py | 174 +++++++++++ .../scalar_data/sql_storage_sqlite.py | 284 +++++++++++++++++ modelcache/manager_mm/vector_data/__init__.py | 10 + modelcache/manager_mm/vector_data/base.py | 41 +++ modelcache/manager_mm/vector_data/faiss.py | 53 ++++ modelcache/manager_mm/vector_data/manager.py | 135 ++++++++ modelcache/manager_mm/vector_data/milvus.py | 189 ++++++++++++ modelcache/manager_mm/vector_data/redis.py | 141 +++++++++ modelcache/processor/pre.py | 11 + multicache_serving.py | 115 +++---- 33 files changed, 2390 insertions(+), 81 deletions(-) create mode 100644 modelcache/adapter_mm/__init__.py create mode 100644 modelcache/adapter_mm/adapter.py create mode 100644 modelcache/adapter_mm/adapter_insert.py create mode 100644 modelcache/adapter_mm/adapter_query.py create mode 100644 modelcache/adapter_mm/adapter_register.py create mode 100644 modelcache/adapter_mm/adapter_remove.py create mode 100644 modelcache/manager_mm/__init__.py create mode 100644 modelcache/manager_mm/data_manager.py create mode 100644 modelcache/manager_mm/data_manager_mm.py create mode 100644 modelcache/manager_mm/eviction/__init__.py create mode 100644 modelcache/manager_mm/eviction/base.py create mode 100644 modelcache/manager_mm/eviction/manager.py create mode 100644 modelcache/manager_mm/eviction/memory_cache.py create mode 100644 modelcache/manager_mm/eviction_manager.py create mode 100644 modelcache/manager_mm/factory.py create mode 100644 modelcache/manager_mm/object_data/__init__.py create mode 100644 modelcache/manager_mm/object_data/base.py create mode 100644 modelcache/manager_mm/scalar_data/__init__.py create mode 100644 modelcache/manager_mm/scalar_data/base.py create mode 100644 modelcache/manager_mm/scalar_data/manager.py create mode 100644 modelcache/manager_mm/scalar_data/sql_storage.py create mode 100644 modelcache/manager_mm/scalar_data/sql_storage_sqlite.py create mode 100644 modelcache/manager_mm/vector_data/__init__.py create mode 100644 modelcache/manager_mm/vector_data/base.py create mode 100644 modelcache/manager_mm/vector_data/faiss.py create mode 100644 modelcache/manager_mm/vector_data/manager.py create mode 100644 modelcache/manager_mm/vector_data/milvus.py create mode 100644 modelcache/manager_mm/vector_data/redis.py diff --git a/.gitignore b/.gitignore index 72fc777..929c3ce 100644 --- a/.gitignore +++ b/.gitignore @@ -136,4 +136,6 @@ dmypy.json /flask_server *.bin **/modelcache_serving.py -*.ini \ No newline at end of file +*.ini + +**/maya_embedding_service \ No newline at end of file diff --git a/modelcache/adapter_mm/__init__.py b/modelcache/adapter_mm/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/modelcache/adapter_mm/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/modelcache/adapter_mm/adapter.py b/modelcache/adapter_mm/adapter.py new file mode 100644 index 0000000..abde6fb --- /dev/null +++ b/modelcache/adapter_mm/adapter.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +import logging + +from modelcache.adapter_mm.adapter_query import adapt_query +from modelcache.adapter_mm.adapter_insert import adapt_insert +from modelcache.adapter.adapter_remove import adapt_remove +from modelcache.adapter.adapter_register import adapt_register + + +class ChatCompletion(object): + """Openai ChatCompletion Wrapper""" + @classmethod + def create_mm_query(cls, *args, **kwargs): + def cache_data_convert(cache_data, cache_query): + return construct_resp_from_cache(cache_data, cache_query) + try: + return adapt_query( + cache_data_convert, + *args, + **kwargs + ) + except Exception as e: + return str(e) + + @classmethod + def create_mm_insert(cls, *args, **kwargs): + try: + return adapt_insert( + *args, + **kwargs + ) + except Exception as e: + return str(e) + + @classmethod + def create_mm_remove(cls, *args, **kwargs): + try: + return adapt_remove( + *args, + **kwargs + ) + except Exception as e: + logging.info('adapt_remove_e: {}'.format(e)) + return str(e) + + @classmethod + def create_mm_register(cls, *args, **kwargs): + try: + return adapt_register( + *args, + **kwargs + ) + except Exception as e: + return str(e) + + +def construct_resp_from_cache(return_message, return_query): + return { + "modelcache": True, + "hitQuery": return_query, + "data": return_message, + "errorCode": 0 + } diff --git a/modelcache/adapter_mm/adapter_insert.py b/modelcache/adapter_mm/adapter_insert.py new file mode 100644 index 0000000..74aa619 --- /dev/null +++ b/modelcache/adapter_mm/adapter_insert.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +from modelcache import cache +from modelcache.utils.error import NotInitError +from modelcache.utils.time import time_cal + + +def adapt_insert(*args, **kwargs): + chat_cache = kwargs.pop("cache_obj", cache) + model = kwargs.pop("model", None) + require_object_store = kwargs.pop("require_object_store", False) + if require_object_store: + assert chat_cache.data_manager.o, "Object store is required for adapter." + if not chat_cache.has_init: + raise NotInitError() + cache_enable = chat_cache.cache_enable_func(*args, **kwargs) + context = kwargs.pop("cache_context", {}) + embedding_data = None + pre_embedding_data = chat_cache.insert_pre_embedding_func( + kwargs, + extra_param=context.get("pre_embedding_func", None), + prompts=chat_cache.config.prompts, + ) + chat_info = kwargs.pop("chat_info", []) + llm_data = chat_info[-1]['answer'] + + if cache_enable: + embedding_data = time_cal( + chat_cache.embedding_func, + func_name="embedding", + report_func=chat_cache.report.embedding, + )(pre_embedding_data) + + chat_cache.data_manager.save( + pre_embedding_data, + llm_data, + embedding_data, + model=model, + extra_param=context.get("save_func", None) + ) + return 'success' diff --git a/modelcache/adapter_mm/adapter_query.py b/modelcache/adapter_mm/adapter_query.py new file mode 100644 index 0000000..934c644 --- /dev/null +++ b/modelcache/adapter_mm/adapter_query.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +import logging +import time +from modelcache import cache +from modelcache.utils.error import NotInitError +from modelcache.utils.time import time_cal +from modelcache.processor.pre import multi_analysis + + +def adapt_query(cache_data_convert, *args, **kwargs): + chat_cache = kwargs.pop("cache_obj", cache) + scope = kwargs.pop("scope", None) + model = scope['model'] + if not chat_cache.has_init: + raise NotInitError() + cache_enable = chat_cache.cache_enable_func(*args, **kwargs) + context = kwargs.pop("cache_context", {}) + embedding_data = None + cache_factor = kwargs.pop("cache_factor", 1.0) + pre_embedding_data = chat_cache.query_pre_embedding_func( + kwargs, + extra_param=context.get("pre_embedding_func", None), + prompts=chat_cache.config.prompts, + ) + + if cache_enable: + embedding_data = time_cal( + chat_cache.embedding_func, + func_name="embedding", + report_func=chat_cache.report.embedding, + )(pre_embedding_data) + + if cache_enable: + cache_data_list = time_cal( + chat_cache.data_manager.search, + func_name="milvus_search", + report_func=chat_cache.report.search, + )( + embedding_data, + extra_param=context.get("search_func", None), + top_k=kwargs.pop("top_k", -1), + model=model + ) + cache_answers = [] + cache_questions = [] + cache_ids = [] + similarity_threshold = chat_cache.config.similarity_threshold + similarity_threshold_long = chat_cache.config.similarity_threshold_long + + min_rank, max_rank = chat_cache.similarity_evaluation.range() + rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor + rank_threshold_long = (max_rank - min_rank) * similarity_threshold_long * cache_factor + rank_threshold = ( + max_rank + if rank_threshold > max_rank + else min_rank + if rank_threshold < min_rank + else rank_threshold + ) + rank_threshold_long = ( + max_rank + if rank_threshold_long > max_rank + else min_rank + if rank_threshold_long < min_rank + else rank_threshold_long + ) + + if cache_data_list is None or len(cache_data_list) == 0: + rank_pre = -1.0 + else: + cache_data_dict = {'search_result': cache_data_list[0]} + rank_pre = chat_cache.similarity_evaluation.evaluation( + None, + cache_data_dict, + extra_param=context.get("evaluation_func", None), + ) + if rank_pre < rank_threshold: + return + + for cache_data in cache_data_list: + primary_id = cache_data[1] + start_time = time.time() + ret = chat_cache.data_manager.get_scalar_data( + cache_data, extra_param=context.get("get_scalar_data", None) + ) + if ret is None: + continue + + if "deps" in context and hasattr(ret.question, "deps"): + eval_query_data = { + "question": context["deps"][0]["data"], + "embedding": None + } + eval_cache_data = { + "question": ret.question.deps[0].data, + "answer": ret.answers[0].answer, + "search_result": cache_data, + "embedding": None, + } + else: + eval_query_data = { + "question": pre_embedding_data, + "embedding": embedding_data, + } + + eval_cache_data = { + "question": ret[0], + "answer": ret[1], + "search_result": cache_data, + "embedding": None + } + rank = chat_cache.similarity_evaluation.evaluation( + eval_query_data, + eval_cache_data, + extra_param=context.get("evaluation_func", None), + ) + + if len(pre_embedding_data) <= 256: + if rank_threshold <= rank: + cache_answers.append((rank, ret[1])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) + else: + if rank_threshold_long <= rank: + cache_answers.append((rank, ret[1])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) + cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True) + cache_questions = sorted(cache_questions, key=lambda x: x[0], reverse=True) + cache_ids = sorted(cache_ids, key=lambda x: x[0], reverse=True) + if len(cache_answers) != 0: + return_message = chat_cache.post_process_messages_func( + [t[1] for t in cache_answers] + ) + return_query = chat_cache.post_process_messages_func( + [t[1] for t in cache_questions] + ) + return_id = chat_cache.post_process_messages_func( + [t[1] for t in cache_ids] + ) + # 更新命中次数 + try: + chat_cache.data_manager.update_hit_count(return_id) + except Exception: + logging.info('update_hit_count except, please check!') + + chat_cache.report.hint_cache() + return cache_data_convert(return_message, return_query) diff --git a/modelcache/adapter_mm/adapter_register.py b/modelcache/adapter_mm/adapter_register.py new file mode 100644 index 0000000..53df128 --- /dev/null +++ b/modelcache/adapter_mm/adapter_register.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +from modelcache import cache + + +def adapt_register(*args, **kwargs): + chat_cache = kwargs.pop("cache_obj", cache) + model = kwargs.pop("model", None) + if model is None or len(model) == 0: + return ValueError('') + + register_resp = chat_cache.data_manager.create_index(model) + print('register_resp: {}'.format(register_resp)) + return register_resp diff --git a/modelcache/adapter_mm/adapter_remove.py b/modelcache/adapter_mm/adapter_remove.py new file mode 100644 index 0000000..25f1ba3 --- /dev/null +++ b/modelcache/adapter_mm/adapter_remove.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +from modelcache import cache +from modelcache.utils.error import NotInitError, RemoveError + + +def adapt_remove(*args, **kwargs): + chat_cache = kwargs.pop("cache_obj", cache) + model = kwargs.pop("model", None) + remove_type = kwargs.pop("remove_type", None) + require_object_store = kwargs.pop("require_object_store", False) + if require_object_store: + assert chat_cache.data_manager.o, "Object store is required for adapter." + if not chat_cache.has_init: + raise NotInitError() + + # delete data + if remove_type == 'delete_by_id': + id_list = kwargs.pop("id_list", []) + resp = chat_cache.data_manager.delete(id_list, model=model) + elif remove_type == 'truncate_by_model': + resp = chat_cache.data_manager.truncate(model) + else: + # resp = "remove_type_error" + raise RemoveError() + return resp + diff --git a/modelcache/core.py b/modelcache/core.py index 331fe6b..2f633f5 100644 --- a/modelcache/core.py +++ b/modelcache/core.py @@ -17,9 +17,10 @@ class Cache: def __init__(self): self.has_init = False self.cache_enable_func = None - self.query_pre_embedding_func = None - self.insert_pre_embedding_func = None + self.mm_query_pre_embedding_func = None + self.mm_insert_pre_embedding_func = None self.embedding_func = None + self.embedding_concurrent_func = None self.data_manager: Optional[DataManager] = None self.similarity_evaluation: Optional[SimilarityEvaluation] = None self.post_process_messages_func = None @@ -30,9 +31,10 @@ def __init__(self): def init( self, cache_enable_func=cache_all, - query_pre_embedding_func=None, - insert_pre_embedding_func=None, + mm_query_pre_embedding_func=None, + mm_insert_pre_embedding_func=None, embedding_func=string_embedding, + embedding_concurrent_func=string_embedding, data_manager: DataManager = get_data_manager(), similarity_evaluation=ExactMatchEvaluation(), post_process_messages_func=first, @@ -41,9 +43,10 @@ def init( ): self.has_init = True self.cache_enable_func = cache_enable_func - self.query_pre_embedding_func = query_pre_embedding_func - self.insert_pre_embedding_func = insert_pre_embedding_func + self.mm_query_pre_embedding_func = mm_query_pre_embedding_func + self.mm_insert_pre_embedding_func = mm_insert_pre_embedding_func self.embedding_func = embedding_func + self.embedding_concurrent_func = embedding_concurrent_func self.data_manager: DataManager = data_manager self.similarity_evaluation = similarity_evaluation self.post_process_messages_func = post_process_messages_func diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index 8a8fb37..a3f7faf 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -21,19 +21,25 @@ def __init__( port: str = "6379", username: str = "", password: str = "", - dimension: int = 0, + # dimension: int = 0, + mm_dimension: int = 0, + i_dimension: int = 0, + t_dimension: int = 0, top_k: int = 1, namespace: str = "", ): - if dimension <= 0: + if mm_dimension <= 0: raise ValueError( - f"invalid `dim` param: {dimension} in the Milvus vector store." + f"invalid `dim` param: {mm_dimension} in the Redis vector store." ) self._client = Redis( host=host, port=int(port), username=username, password=password ) self.top_k = top_k - self.dimension = dimension + # self.dimension = dimension + self.mm_dimension = mm_dimension + self.i_dimension = i_dimension + self.t_dimension = t_dimension self.namespace = namespace self.doc_prefix = f"{self.namespace}doc:" diff --git a/modelcache/manager_mm/__init__.py b/modelcache/manager_mm/__init__.py new file mode 100644 index 0000000..4bfc597 --- /dev/null +++ b/modelcache/manager_mm/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from modelcache.manager_mm.scalar_data import CacheBase +from modelcache.manager_mm.vector_data import VectorBase +from modelcache.manager_mm.object_data import ObjectBase +from modelcache.manager_mm.factory import get_data_manager diff --git a/modelcache/manager_mm/data_manager.py b/modelcache/manager_mm/data_manager.py new file mode 100644 index 0000000..d6637ea --- /dev/null +++ b/modelcache/manager_mm/data_manager.py @@ -0,0 +1,291 @@ +# -*- coding: utf-8 -*- +import logging +import time +import requests +import pickle +import numpy as np +import cachetools +from abc import abstractmethod, ABCMeta +from typing import List, Any, Optional, Union +from modelcache.manager.scalar_data.base import ( + CacheStorage, + CacheData, + DataType, + Answer, + Question +) +from modelcache.utils.error import CacheError, ParamError +from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.manager.object_data.base import ObjectBase +from modelcache.manager.eviction import EvictionBase +from modelcache.manager.eviction_manager import EvictionManager +from modelcache.utils.log import modelcache_log + + +class DataManager(metaclass=ABCMeta): + """DataManager manage the cache data, including save and search""" + + @abstractmethod + def save(self, question, answer, embedding_data, **kwargs): + pass + + @abstractmethod + def save_query_resp(self, query_resp_dict, **kwargs): + pass + + @abstractmethod + def import_data( + self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model:Any + ): + pass + + @abstractmethod + def get_scalar_data(self, res_data, **kwargs) -> CacheData: + pass + + @abstractmethod + def update_hit_count(self, primary_id, **kwargs): + pass + + def hit_cache_callback(self, res_data, **kwargs): + pass + + @abstractmethod + def search(self, embedding_data, **kwargs): + pass + + @abstractmethod + def delete(self, id_list, **kwargs): + pass + + def truncate(self, model_name): + pass + + def flush(self): + pass + + @abstractmethod + def close(self): + pass + + +class MapDataManager(DataManager): + def __init__(self, data_path, max_size, get_data_container=None): + if get_data_container is None: + self.data = cachetools.LRUCache(max_size) + else: + self.data = get_data_container(max_size) + self.data_path = data_path + self.init() + + def init(self): + try: + with open(self.data_path, "rb") as f: + self.data = pickle.load(f) + except FileNotFoundError: + return + except PermissionError: + raise CacheError( # pylint: disable=W0707 + f"You don't have permission to access this file <{self.data_path}>." + ) + + def save(self, question, answer, embedding_data, **kwargs): + if isinstance(question, Question): + question = question.content + self.data[embedding_data] = (question, answer, embedding_data) + + def save_query_resp(self, query_resp_dict, **kwargs): + pass + + def import_data( + self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model: Any + ): + if len(questions) != len(answers) or len(questions) != len(embedding_datas): + raise ParamError("Make sure that all parameters have the same length") + for i, embedding_data in enumerate(embedding_datas): + self.data[embedding_data] = (questions[i], answers[i], embedding_datas[i]) + + def get_scalar_data(self, res_data, **kwargs) -> CacheData: + return CacheData(question=res_data[0], answers=res_data[1]) + + def update_hit_count(self, primary_id, **kwargs): + pass + + def search(self, embedding_data, **kwargs): + try: + return [self.data[embedding_data]] + except KeyError: + return [] + + def delete(self, id_list, **kwargs): + pass + + def truncate(self, model_name): + pass + + def flush(self): + try: + with open(self.data_path, "wb") as f: + pickle.dump(self.data, f) + except PermissionError: + modelcache_log.error( + "You don't have permission to access this file %s.", self.data_path + ) + + def close(self): + self.flush() + + +def normalize(vec): + magnitude = np.linalg.norm(vec) + normalized_v = vec / magnitude + return normalized_v + + +class SSDataManager(DataManager): + def __init__( + self, + s: CacheStorage, + v: VectorBase, + o: Optional[ObjectBase], + max_size, + clean_size, + policy="LRU", + ): + self.max_size = max_size + self.clean_size = clean_size + self.s = s + self.v = v + self.o = o + + def save(self, question, answer, embedding_data, **kwargs): + model = kwargs.pop("model", None) + self.import_data([question], [answer], [embedding_data], model) + + def save_query_resp(self, query_resp_dict, **kwargs): + save_query_start_time = time.time() + self.s.insert_query_resp(query_resp_dict, **kwargs) + save_query_delta_time = '{}s'.format(round(time.time() - save_query_start_time, 2)) + + def _process_answer_data(self, answers: Union[Answer, List[Answer]]): + if isinstance(answers, Answer): + answers = [answers] + new_ans = [] + for ans in answers: + if ans.answer_type != DataType.STR: + new_ans.append(Answer(self.o.put(ans.answer), ans.answer_type)) + else: + new_ans.append(ans) + return new_ans + + def _process_question_data(self, question: Union[str, Question]): + if isinstance(question, Question): + if question.deps is None: + return question + + for dep in question.deps: + if dep.dep_type == DataType.IMAGE_URL: + dep.dep_type.data = self.o.put(requests.get(dep.data).content) + return question + + return Question(question) + + def import_data( + self, questions: List[Any], answers: List[Answer], embedding_datas: List[Any], model: Any + ): + if len(questions) != len(answers) or len(questions) != len(embedding_datas): + raise ParamError("Make sure that all parameters have the same length") + cache_datas = [] + + embedding_datas = [ + normalize(embedding_data) for embedding_data in embedding_datas + ] + + for i, embedding_data in enumerate(embedding_datas): + if self.o is not None: + ans = self._process_answer_data(answers[i]) + else: + ans = answers[i] + + question = questions[i] + embedding_data = embedding_data.astype("float32") + cache_datas.append([ans, question, embedding_data, model]) + + ids = self.s.batch_insert(cache_datas) + logging.info('ids: {}'.format(ids)) + self.v.mul_add( + [ + VectorData(id=ids[i], data=embedding_data) + for i, embedding_data in enumerate(embedding_datas) + ], + model + + ) + + def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: + cache_data = self.s.get_data_by_id(res_data[1]) + if cache_data is None: + return None + return cache_data + + def update_hit_count(self, primary_id, **kwargs): + self.s.update_hit_count_by_id(primary_id) + + def hit_cache_callback(self, res_data, **kwargs): + self.eviction_base.get(res_data[1]) + + def search(self, embedding_data, **kwargs): + model = kwargs.pop("model", None) + embedding_data = normalize(embedding_data) + top_k = kwargs.get("top_k", -1) + return self.v.search(data=embedding_data, top_k=top_k, model=model) + + def delete(self, id_list, **kwargs): + model = kwargs.pop("model", None) + try: + v_delete_count = self.v.delete(ids=id_list, model=model) + except Exception as e: + return {'status': 'failed', 'milvus': 'delete milvus data failed, please check! e: {}'.format(e), + 'mysql': 'unexecuted'} + try: + s_delete_count = self.s.mark_deleted(id_list) + except Exception as e: + return {'status': 'failed', 'milvus': 'success', + 'mysql': 'delete mysql data failed, please check! e: {}'.format(e)} + + return {'status': 'success', 'milvus': 'delete_count: '+str(v_delete_count), + 'mysql': 'delete_count: '+str(s_delete_count)} + + def create_index(self, model, **kwargs): + return self.v.create(model) + + def truncate(self, model_name): + # drop vector base data + try: + vector_resp = self.v.rebuild_col(model_name) + except Exception as e: + return {'status': 'failed', 'VectorDB': 'truncate VectorDB data failed, please check! e: {}'.format(e), + 'ScalarDB': 'unexecuted'} + if vector_resp: + return {'status': 'failed', 'VectorDB': vector_resp, 'ScalarDB': 'unexecuted'} + # drop scalar base data + try: + delete_count = self.s.model_deleted(model_name) + except Exception as e: + return {'status': 'failed', 'VectorDB': 'rebuild', + 'ScalarDB': 'truncate scalar data failed, please check! e: {}'.format(e)} + return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} + + def flush(self): + self.s.flush() + self.v.flush() + + def close(self): + self.s.close() + self.v.close() + + +# if __name__ == '__main__': +# from modelcache.manager import CacheBase, VectorBase, get_data_manager +# data_manager = get_data_manager(CacheBase('mysql'), VectorBase('milvus', dimension=128)) +# data_manager.save('hello', 'hi', np.random.random((128,)).astype('float32'), model='gptcode_6b') diff --git a/modelcache/manager_mm/data_manager_mm.py b/modelcache/manager_mm/data_manager_mm.py new file mode 100644 index 0000000..d6637ea --- /dev/null +++ b/modelcache/manager_mm/data_manager_mm.py @@ -0,0 +1,291 @@ +# -*- coding: utf-8 -*- +import logging +import time +import requests +import pickle +import numpy as np +import cachetools +from abc import abstractmethod, ABCMeta +from typing import List, Any, Optional, Union +from modelcache.manager.scalar_data.base import ( + CacheStorage, + CacheData, + DataType, + Answer, + Question +) +from modelcache.utils.error import CacheError, ParamError +from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.manager.object_data.base import ObjectBase +from modelcache.manager.eviction import EvictionBase +from modelcache.manager.eviction_manager import EvictionManager +from modelcache.utils.log import modelcache_log + + +class DataManager(metaclass=ABCMeta): + """DataManager manage the cache data, including save and search""" + + @abstractmethod + def save(self, question, answer, embedding_data, **kwargs): + pass + + @abstractmethod + def save_query_resp(self, query_resp_dict, **kwargs): + pass + + @abstractmethod + def import_data( + self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model:Any + ): + pass + + @abstractmethod + def get_scalar_data(self, res_data, **kwargs) -> CacheData: + pass + + @abstractmethod + def update_hit_count(self, primary_id, **kwargs): + pass + + def hit_cache_callback(self, res_data, **kwargs): + pass + + @abstractmethod + def search(self, embedding_data, **kwargs): + pass + + @abstractmethod + def delete(self, id_list, **kwargs): + pass + + def truncate(self, model_name): + pass + + def flush(self): + pass + + @abstractmethod + def close(self): + pass + + +class MapDataManager(DataManager): + def __init__(self, data_path, max_size, get_data_container=None): + if get_data_container is None: + self.data = cachetools.LRUCache(max_size) + else: + self.data = get_data_container(max_size) + self.data_path = data_path + self.init() + + def init(self): + try: + with open(self.data_path, "rb") as f: + self.data = pickle.load(f) + except FileNotFoundError: + return + except PermissionError: + raise CacheError( # pylint: disable=W0707 + f"You don't have permission to access this file <{self.data_path}>." + ) + + def save(self, question, answer, embedding_data, **kwargs): + if isinstance(question, Question): + question = question.content + self.data[embedding_data] = (question, answer, embedding_data) + + def save_query_resp(self, query_resp_dict, **kwargs): + pass + + def import_data( + self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model: Any + ): + if len(questions) != len(answers) or len(questions) != len(embedding_datas): + raise ParamError("Make sure that all parameters have the same length") + for i, embedding_data in enumerate(embedding_datas): + self.data[embedding_data] = (questions[i], answers[i], embedding_datas[i]) + + def get_scalar_data(self, res_data, **kwargs) -> CacheData: + return CacheData(question=res_data[0], answers=res_data[1]) + + def update_hit_count(self, primary_id, **kwargs): + pass + + def search(self, embedding_data, **kwargs): + try: + return [self.data[embedding_data]] + except KeyError: + return [] + + def delete(self, id_list, **kwargs): + pass + + def truncate(self, model_name): + pass + + def flush(self): + try: + with open(self.data_path, "wb") as f: + pickle.dump(self.data, f) + except PermissionError: + modelcache_log.error( + "You don't have permission to access this file %s.", self.data_path + ) + + def close(self): + self.flush() + + +def normalize(vec): + magnitude = np.linalg.norm(vec) + normalized_v = vec / magnitude + return normalized_v + + +class SSDataManager(DataManager): + def __init__( + self, + s: CacheStorage, + v: VectorBase, + o: Optional[ObjectBase], + max_size, + clean_size, + policy="LRU", + ): + self.max_size = max_size + self.clean_size = clean_size + self.s = s + self.v = v + self.o = o + + def save(self, question, answer, embedding_data, **kwargs): + model = kwargs.pop("model", None) + self.import_data([question], [answer], [embedding_data], model) + + def save_query_resp(self, query_resp_dict, **kwargs): + save_query_start_time = time.time() + self.s.insert_query_resp(query_resp_dict, **kwargs) + save_query_delta_time = '{}s'.format(round(time.time() - save_query_start_time, 2)) + + def _process_answer_data(self, answers: Union[Answer, List[Answer]]): + if isinstance(answers, Answer): + answers = [answers] + new_ans = [] + for ans in answers: + if ans.answer_type != DataType.STR: + new_ans.append(Answer(self.o.put(ans.answer), ans.answer_type)) + else: + new_ans.append(ans) + return new_ans + + def _process_question_data(self, question: Union[str, Question]): + if isinstance(question, Question): + if question.deps is None: + return question + + for dep in question.deps: + if dep.dep_type == DataType.IMAGE_URL: + dep.dep_type.data = self.o.put(requests.get(dep.data).content) + return question + + return Question(question) + + def import_data( + self, questions: List[Any], answers: List[Answer], embedding_datas: List[Any], model: Any + ): + if len(questions) != len(answers) or len(questions) != len(embedding_datas): + raise ParamError("Make sure that all parameters have the same length") + cache_datas = [] + + embedding_datas = [ + normalize(embedding_data) for embedding_data in embedding_datas + ] + + for i, embedding_data in enumerate(embedding_datas): + if self.o is not None: + ans = self._process_answer_data(answers[i]) + else: + ans = answers[i] + + question = questions[i] + embedding_data = embedding_data.astype("float32") + cache_datas.append([ans, question, embedding_data, model]) + + ids = self.s.batch_insert(cache_datas) + logging.info('ids: {}'.format(ids)) + self.v.mul_add( + [ + VectorData(id=ids[i], data=embedding_data) + for i, embedding_data in enumerate(embedding_datas) + ], + model + + ) + + def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: + cache_data = self.s.get_data_by_id(res_data[1]) + if cache_data is None: + return None + return cache_data + + def update_hit_count(self, primary_id, **kwargs): + self.s.update_hit_count_by_id(primary_id) + + def hit_cache_callback(self, res_data, **kwargs): + self.eviction_base.get(res_data[1]) + + def search(self, embedding_data, **kwargs): + model = kwargs.pop("model", None) + embedding_data = normalize(embedding_data) + top_k = kwargs.get("top_k", -1) + return self.v.search(data=embedding_data, top_k=top_k, model=model) + + def delete(self, id_list, **kwargs): + model = kwargs.pop("model", None) + try: + v_delete_count = self.v.delete(ids=id_list, model=model) + except Exception as e: + return {'status': 'failed', 'milvus': 'delete milvus data failed, please check! e: {}'.format(e), + 'mysql': 'unexecuted'} + try: + s_delete_count = self.s.mark_deleted(id_list) + except Exception as e: + return {'status': 'failed', 'milvus': 'success', + 'mysql': 'delete mysql data failed, please check! e: {}'.format(e)} + + return {'status': 'success', 'milvus': 'delete_count: '+str(v_delete_count), + 'mysql': 'delete_count: '+str(s_delete_count)} + + def create_index(self, model, **kwargs): + return self.v.create(model) + + def truncate(self, model_name): + # drop vector base data + try: + vector_resp = self.v.rebuild_col(model_name) + except Exception as e: + return {'status': 'failed', 'VectorDB': 'truncate VectorDB data failed, please check! e: {}'.format(e), + 'ScalarDB': 'unexecuted'} + if vector_resp: + return {'status': 'failed', 'VectorDB': vector_resp, 'ScalarDB': 'unexecuted'} + # drop scalar base data + try: + delete_count = self.s.model_deleted(model_name) + except Exception as e: + return {'status': 'failed', 'VectorDB': 'rebuild', + 'ScalarDB': 'truncate scalar data failed, please check! e: {}'.format(e)} + return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} + + def flush(self): + self.s.flush() + self.v.flush() + + def close(self): + self.s.close() + self.v.close() + + +# if __name__ == '__main__': +# from modelcache.manager import CacheBase, VectorBase, get_data_manager +# data_manager = get_data_manager(CacheBase('mysql'), VectorBase('milvus', dimension=128)) +# data_manager.save('hello', 'hi', np.random.random((128,)).astype('float32'), model='gptcode_6b') diff --git a/modelcache/manager_mm/eviction/__init__.py b/modelcache/manager_mm/eviction/__init__.py new file mode 100644 index 0000000..8ca7a3d --- /dev/null +++ b/modelcache/manager_mm/eviction/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +from modelcache.utils.lazy_import import LazyImport + +eviction_manager = LazyImport( + "eviction_manager", globals(), "modelcache.manager.eviction.manager" +) + + +def EvictionBase(name: str, **kwargs): + return eviction_manager.EvictionBase.get(name, **kwargs) diff --git a/modelcache/manager_mm/eviction/base.py b/modelcache/manager_mm/eviction/base.py new file mode 100644 index 0000000..352c9a3 --- /dev/null +++ b/modelcache/manager_mm/eviction/base.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +from abc import ABCMeta, abstractmethod +from typing import Any, List + + +class EvictionBase(metaclass=ABCMeta): + """ + Eviction base. + """ + + @abstractmethod + def put(self, objs: List[Any]): + pass + + @abstractmethod + def get(self, obj: Any): + pass + + @property + @abstractmethod + def policy(self) -> str: + pass diff --git a/modelcache/manager_mm/eviction/manager.py b/modelcache/manager_mm/eviction/manager.py new file mode 100644 index 0000000..61579f0 --- /dev/null +++ b/modelcache/manager_mm/eviction/manager.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +from typing import Callable, List, Any +from modelcache.utils.error import NotFoundError + + +class EvictionBase: + """ + EvictionBase to evict the cache data. + """ + + def __init__(self): + raise EnvironmentError( + "EvictionBase is designed to be instantiated, " + "please using the `EvictionBase.get(name, policy, maxsize, clean_size)`." + ) + + @staticmethod + def get(name: str, policy: str, maxsize: int, clean_size: int, on_evict: Callable[[List[Any]], None], **kwargs): + if name in "memory": + from modelcache.manager.eviction.memory_cache import MemoryCacheEviction + + eviction_base = MemoryCacheEviction(policy, maxsize, clean_size, on_evict, **kwargs) + else: + raise NotFoundError("eviction base", name) + return eviction_base diff --git a/modelcache/manager_mm/eviction/memory_cache.py b/modelcache/manager_mm/eviction/memory_cache.py new file mode 100644 index 0000000..3cb487f --- /dev/null +++ b/modelcache/manager_mm/eviction/memory_cache.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +from typing import Any, Callable, List +import cachetools + +from modelcache.manager.eviction.base import EvictionBase + + +def popitem_wrapper(func, wrapper_func, clean_size): + def wrapper(*args, **kwargs): + keys = [] + try: + keys = [func(*args, **kwargs)[0] for _ in range(clean_size)] + except KeyError: + pass + wrapper_func(keys) + return wrapper + + +class MemoryCacheEviction(EvictionBase): + def __init__(self, policy: str, maxsize: int, clean_size: int, on_evict: Callable[[List[Any]], None], **kwargs): + self._policy = policy.upper() + if self._policy == "LRU": + self._cache = cachetools.LRUCache(maxsize=maxsize, **kwargs) + elif self._policy == "LFU": + self._cache = cachetools.LFUCache(maxsize=maxsize, **kwargs) + elif self._policy == "FIFO": + self._cache = cachetools.FIFOCache(maxsize=maxsize, **kwargs) + elif self._policy == "RR": + self._cache = cachetools.RRCache(maxsize=maxsize, **kwargs) + else: + raise ValueError(f"Unknown policy {policy}") + + self._cache.popitem = popitem_wrapper(self._cache.popitem, on_evict, clean_size) + + def put(self, objs: List[Any]): + for obj in objs: + self._cache[obj] = True + + def get(self, obj: Any): + return self._cache.get(obj) + + @property + def policy(self) -> str: + return self._policy diff --git a/modelcache/manager_mm/eviction_manager.py b/modelcache/manager_mm/eviction_manager.py new file mode 100644 index 0000000..0a6a406 --- /dev/null +++ b/modelcache/manager_mm/eviction_manager.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +class EvictionManager: + """ + EvictionManager to manager the eviction policy. + + :param scalar_storage: CacheStorage to manager the scalar data. + :type scalar_storage: :class:`CacheStorage` + :param vector_base: VectorBase to manager the vector data. + :type vector_base: :class:`VectorBase` + """ + + MAX_MARK_COUNT = 5000 + MAX_MARK_RATE = 0.1 + BATCH_SIZE = 100000 + REBUILD_CONDITION = 5 + + def __init__(self, scalar_storage, vector_base): + self._scalar_storage = scalar_storage + self._vector_base = vector_base + self.delete_count = 0 + + def check_evict(self): + mark_count = self._scalar_storage.count(state=-1) + all_count = self._scalar_storage.count(is_all=True) + if ( + mark_count > self.MAX_MARK_COUNT + or mark_count / all_count > self.MAX_MARK_RATE + ): + return True + return False + + def delete(self): + mark_ids = self._scalar_storage.get_ids(deleted=True) + self._scalar_storage.clear_deleted_data() + self._vector_base.delete(mark_ids) + self.delete_count += 1 + if self.delete_count >= self.REBUILD_CONDITION: + self.rebuild() + + def rebuild(self): + self._scalar_storage.clear_deleted_data() + ids = self._scalar_storage.get_ids(deleted=False) + self._vector_base.rebuild(ids) + self.delete_count = 0 + + def soft_evict(self, marked_keys): + self._scalar_storage.mark_deleted(marked_keys) diff --git a/modelcache/manager_mm/factory.py b/modelcache/manager_mm/factory.py new file mode 100644 index 0000000..08baf6b --- /dev/null +++ b/modelcache/manager_mm/factory.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +from typing import Union, Callable +from modelcache.manager import CacheBase, VectorBase, ObjectBase +from modelcache.manager.data_manager import SSDataManager, MapDataManager + + +def get_data_manager( + cache_base: Union[CacheBase, str] = None, + vector_base: Union[VectorBase, str] = None, + object_base: Union[ObjectBase, str] = None, + max_size: int = 1000, + clean_size: int = None, + eviction: str = "LRU", + data_path: str = "data_map.txt", + get_data_container: Callable = None, +): + if not cache_base and not vector_base: + return MapDataManager(data_path, max_size, get_data_container) + + if isinstance(cache_base, str): + cache_base = CacheBase(name=cache_base) + if isinstance(vector_base, str): + vector_base = VectorBase(name=vector_base) + if isinstance(object_base, str): + object_base = ObjectBase(name=object_base) + assert cache_base and vector_base + return SSDataManager(cache_base, vector_base, object_base, max_size, clean_size, eviction) + + +def get_data_manager_mm( + cache_base: Union[CacheBase, str] = None, + vector_base: Union[VectorBase, str] = None, + object_base: Union[ObjectBase, str] = None, + max_size: int = 1000, + clean_size: int = None, + eviction: str = "LRU", + data_path: str = "data_map.txt", + get_data_container: Callable = None, +): + if not cache_base and not vector_base: + return MapDataManager(data_path, max_size, get_data_container) + + if isinstance(cache_base, str): + cache_base = CacheBase(name=cache_base) + if isinstance(vector_base, str): + vector_base = VectorBase(name=vector_base) + if isinstance(object_base, str): + object_base = ObjectBase(name=object_base) + assert cache_base and vector_base + return SSDataManager(cache_base, vector_base, object_base, max_size, clean_size, eviction) diff --git a/modelcache/manager_mm/object_data/__init__.py b/modelcache/manager_mm/object_data/__init__.py new file mode 100644 index 0000000..f1186c0 --- /dev/null +++ b/modelcache/manager_mm/object_data/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +from modelcache.utils.lazy_import import LazyImport +object_manager = LazyImport( + "object_manager", globals(), "modelcache.manager.object_data.manager" +) + + +def ObjectBase(name: str, **kwargs): + return object_manager.ObjectBase.get(name, **kwargs) diff --git a/modelcache/manager_mm/object_data/base.py b/modelcache/manager_mm/object_data/base.py new file mode 100644 index 0000000..9ed00cf --- /dev/null +++ b/modelcache/manager_mm/object_data/base.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +from abc import ABC, abstractmethod +from typing import Any, List + + +class ObjectBase(ABC): + """ + Object storage base. + """ + + @abstractmethod + def put(self, obj: Any) -> str: + pass + + @abstractmethod + def get(self, obj: str) -> Any: + pass + + @abstractmethod + def get_access_link(self, obj: str) -> str: + pass + + @abstractmethod + def delete(self, to_delete: List[str]): + pass diff --git a/modelcache/manager_mm/scalar_data/__init__.py b/modelcache/manager_mm/scalar_data/__init__.py new file mode 100644 index 0000000..b63c430 --- /dev/null +++ b/modelcache/manager_mm/scalar_data/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +from modelcache.utils.lazy_import import LazyImport +scalar_manager = LazyImport( + "scalar_manager", globals(), "modelcache.manager.scalar_data.manager" +) + + +def CacheBase(name: str, **kwargs): + return scalar_manager.CacheBase.get(name, **kwargs) diff --git a/modelcache/manager_mm/scalar_data/base.py b/modelcache/manager_mm/scalar_data/base.py new file mode 100644 index 0000000..fd8bb50 --- /dev/null +++ b/modelcache/manager_mm/scalar_data/base.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +from abc import ABCMeta, abstractmethod +from dataclasses import dataclass +from typing import Union, Dict, List, Optional, Any +from enum import IntEnum +import numpy as np + + +class DataType(IntEnum): + STR = 0 + IMAGE_BASE64 = 1 + IMAGE_URL = 2 + + +@dataclass +class QuestionDep: + """ + QuestionDep + """ + + name: str + data: str + dep_type: int = DataType.STR + + @classmethod + def from_dict(cls, d: Dict): + return cls( + name=d["name"], + data=d["data"], + dep_type=d["dep_type"] + ) + + +@dataclass +class Question: + """ + Question + """ + + content: str + deps: Optional[List[QuestionDep]] = None + + @classmethod + def from_dict(cls, d: Dict): + deps = [] + for dep in d["deps"]: + deps.append(QuestionDep.from_dict(dep)) + return cls(d["content"], deps) + + +@dataclass +class Answer: + """ + data_type: + 0: str + 1: base64 image + """ + + answer: Any + answer_type: int = DataType.STR + + +@dataclass +class CacheData: + """ + CacheData + """ + + question: Union[str, Question] + answers: List[Answer] + embedding_data: Optional[np.ndarray] = None + + def __init__(self, question, answers, embedding_data=None): + self.question = question + self.answers = [] + if isinstance(answers, (str, Answer)): + answers = [answers] + for data in answers: + if isinstance(data, (list, tuple)): + self.answers.append(Answer(*data)) + elif isinstance(data, Answer): + self.answers.append(data) + else: + self.answers.append(Answer(answer=data)) + self.embedding_data = embedding_data + + +class CacheStorage(metaclass=ABCMeta): + """ + BaseStorage for scalar data. + """ + + @abstractmethod + def create(self): + pass + + @abstractmethod + def batch_insert(self, all_data: List[CacheData]): + pass + + @abstractmethod + def insert_query_resp(self, query_resp, **kwargs): + pass + + @abstractmethod + def get_data_by_id(self, key): + pass + + @abstractmethod + def mark_deleted(self, keys): + pass + + @abstractmethod + def model_deleted(self, model_name): + pass + + @abstractmethod + def clear_deleted_data(self): + pass + + @abstractmethod + def get_ids(self, deleted=True): + pass + + @abstractmethod + def count(self): + pass + + def flush(self): + pass + + @abstractmethod + def close(self): + pass diff --git a/modelcache/manager_mm/scalar_data/manager.py b/modelcache/manager_mm/scalar_data/manager.py new file mode 100644 index 0000000..4c02c45 --- /dev/null +++ b/modelcache/manager_mm/scalar_data/manager.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +from modelcache.utils import import_sql_client +from modelcache.utils.error import NotFoundError + +SQL_URL = {"sqlite": "./sqlite.db"} + + +class CacheBase: + """ + CacheBase to manager the cache storage. + """ + + def __init__(self): + raise EnvironmentError( + "CacheBase is designed to be instantiated, please using the `CacheBase.get(name)`." + ) + + @staticmethod + def get(name, **kwargs): + + if name in ["mysql", "oceanbase"]: + from modelcache.manager.scalar_data.sql_storage import SQLStorage + config = kwargs.get("config") + import_sql_client(name) + cache_base = SQLStorage(db_type=name, config=config) + elif name == 'sqlite': + from modelcache.manager.scalar_data.sql_storage_sqlite import SQLStorage + sql_url = kwargs.get("sql_url", SQL_URL[name]) + cache_base = SQLStorage(db_type=name, url=sql_url) + else: + raise NotFoundError("cache store", name) + return cache_base diff --git a/modelcache/manager_mm/scalar_data/sql_storage.py b/modelcache/manager_mm/scalar_data/sql_storage.py new file mode 100644 index 0000000..503217a --- /dev/null +++ b/modelcache/manager_mm/scalar_data/sql_storage.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- +import os +import time + +import pymysql +import json +import base64 +from typing import List +from modelcache.manager.scalar_data.base import CacheStorage, CacheData +from DBUtils.PooledDB import PooledDB + + +class SQLStorage(CacheStorage): + def __init__( + self, + db_type: str = "mysql", + config=None + ): + + self.host = config.get('mysql', 'host') + self.port = int(config.get('mysql', 'port')) + self.username = config.get('mysql', 'username') + self.password = config.get('mysql', 'password') + self.database = config.get('mysql', 'database') + self.pool = PooledDB( + creator=pymysql, + host=self.host, + user=self.username, + password=self.password, + port=self.port, + database=self.database + ) + + def create(self): + pass + + def _insert(self, data: List): + answer = data[0] + question = data[1] + embedding_data = data[2] + model = data[3] + answer_type = 0 + embedding_data = embedding_data.tobytes() + + table_name = "cache_codegpt_answer" + insert_sql = "INSERT INTO {} (question, answer, answer_type, model, embedding_data) VALUES (%s, %s, %s, %s, _binary%s)".format(table_name) + + conn = self.pool.connection() + try: + with conn.cursor() as cursor: + # 执行插入数据操作 + values = (question, answer, answer_type, model, embedding_data) + cursor.execute(insert_sql, values) + conn.commit() + id = cursor.lastrowid + finally: + # 关闭连接,将连接返回给连接池 + conn.close() + return id + + def batch_insert(self, all_data: List[CacheData]): + ids = [] + for data in all_data: + ids.append(self._insert(data)) + return ids + + def insert_query_resp(self, query_resp, **kwargs): + error_code = query_resp.get('errorCode') + error_desc = query_resp.get('errorDesc') + cache_hit = query_resp.get('cacheHit') + model = kwargs.get('model') + query = kwargs.get('query') + delta_time = kwargs.get('delta_time') + hit_query = query_resp.get('hit_query') + answer = query_resp.get('answer') + + if isinstance(hit_query, list): + hit_query = json.dumps(hit_query, ensure_ascii=False) + + table_name = "modelcache_query_log" + insert_sql = "INSERT INTO {} (error_code, error_desc, cache_hit, model, query, delta_time, hit_query, answer) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)".format(table_name) + conn = self.pool.connection() + try: + with conn.cursor() as cursor: + # 执行插入数据操作 + values = (error_code, error_desc, cache_hit, model, query, delta_time, hit_query, answer) + cursor.execute(insert_sql, values) + conn.commit() + finally: + # 关闭连接,将连接返回给连接池 + conn.close() + + def get_data_by_id(self, key: int): + table_name = "cache_codegpt_answer" + query_sql = "select question, answer, embedding_data, model from {} where id={}".format(table_name, key) + conn_start = time.time() + conn = self.pool.connection() + + search_start = time.time() + try: + with conn.cursor() as cursor: + # 执行数据库操作 + cursor.execute(query_sql) + resp = cursor.fetchone() + finally: + # 关闭连接,将连接返回给连接池 + conn.close() + + if resp is not None and len(resp) == 4: + return resp + else: + return None + + def update_hit_count_by_id(self, primary_id: int): + table_name = "cache_codegpt_answer" + update_sql = "UPDATE {} SET hit_count = hit_count+1 WHERE id={}".format(table_name, primary_id) + conn = self.pool.connection() + + # 使用连接执行更新数据操作 + try: + with conn.cursor() as cursor: + # 执行更新数据操作 + cursor.execute(update_sql) + conn.commit() + finally: + # 关闭连接,将连接返回给连接池 + conn.close() + + def get_ids(self, deleted=True): + pass + + def mark_deleted(self, keys): + table_name = "cache_codegpt_answer" + delete_sql = "Delete from {} WHERE id in ({})".format(table_name, ",".join([str(i) for i in keys])) + + # 从连接池中获取连接 + conn = self.pool.connection() + try: + with conn.cursor() as cursor: + # 执行删除数据操作 + cursor.execute(delete_sql) + delete_count = cursor.rowcount + conn.commit() + finally: + # 关闭连接,将连接返回给连接池 + conn.close() + return delete_count + + def model_deleted(self, model_name): + table_name = "cache_codegpt_answer" + delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) + conn = self.pool.connection() + # 使用连接执行删除数据操作 + try: + with conn.cursor() as cursor: + # 执行删除数据操作 + resp = cursor.execute(delete_sql) + conn.commit() + finally: + # 关闭连接,将连接返回给连接池 + conn.close() + return resp + + def clear_deleted_data(self): + pass + + def count(self, state: int = 0, is_all: bool = False): + pass + + def close(self): + pass + + def count_answers(self): + pass diff --git a/modelcache/manager_mm/scalar_data/sql_storage_sqlite.py b/modelcache/manager_mm/scalar_data/sql_storage_sqlite.py new file mode 100644 index 0000000..495fbf7 --- /dev/null +++ b/modelcache/manager_mm/scalar_data/sql_storage_sqlite.py @@ -0,0 +1,284 @@ +# -*- coding: utf-8 -*- +import os +import time + +import pymysql +import json +import base64 +from typing import List +from modelcache.manager.scalar_data.base import CacheStorage, CacheData +import sqlite3 + + +# def insert_single_data(conn, sql, data_tuple): +# cur = conn.cursor() +# try: +# cur.execute(sql, data_tuple) +# conn.commit() +# id = cur.lastrowid +# # print('id: {}'.format(id)) +# return id +# except Exception as e: +# print(e) +# conn.rollback() +# if cur: +# cur.close() +# +# +# def excute_sql(conn, sql): +# cur = conn.cursor() +# try: +# cur.execute(sql) +# conn.commit() +# except Exception as e: +# print(e) +# conn.rollback() +# if cur: +# cur.close() +# +# +# def excute_delete_sql(conn, sql): +# cur = conn.cursor() +# try: +# cur.execute(sql) +# row_count = cur.rowcount +# conn.commit() +# except Exception as e: +# print(e) +# conn.rollback() +# if cur: +# cur.close() +# return row_count +# +# +# def query_fetch_one_data(conn, sql): +# cursor = conn.cursor() +# try: +# cursor.execute(sql) +# except Exception as e: +# print(e) +# conn.rollback() +# rows = cursor.fetchone() +# if cursor: +# cursor.close() +# return rows +# +# +# def close(conn): +# if conn: +# conn.close() + + +class SQLStorage(CacheStorage): + def __init__( + self, + db_type: str = "mysql", + config=None, + url="./sqlite.db" + ): + self._url = url + # self._engine = sqlite3.connect(url) + self.create() + + def create(self): + # answer_table_sql = """CREATE TABLE IF NOT EXISTS `modelcache_llm_answer` ( + # `id` bigint(20) NOT NULL AUTO_INCREMENT comment '主键', + # `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', + # `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', + # `question` text NOT NULL comment 'question', + # `answer` text NOT NULL comment 'answer', + # `answer_type` int(11) NOT NULL comment 'answer_type', + # `hit_count` int(11) NOT NULL DEFAULT '0' comment 'hit_count', + # `model` varchar(1000) NOT NULL comment 'model', + # `embedding_data` blob NOT NULL comment 'embedding_data', + # PRIMARY KEY(`id`) + # ) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'modelcache_llm_answer'; + # """ + answer_table_sql = """CREATE TABLE IF NOT EXISTS modelcache_llm_answer ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + gmt_create TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + question TEXT NOT NULL, + answer TEXT NOT NULL, + answer_type INTEGER NOT NULL, + hit_count INTEGER NOT NULL DEFAULT 0, + model VARCHAR(1000) NOT NULL, + embedding_data BLOB NOT NULL + ); + """ + + # log_table_sql = """CREATE TABLE IF NOT EXISTS `modelcache_query_log` ( + # `id` bigint(20) NOT NULL AUTO_INCREMENT comment '主键', + # `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', + # `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', + # `error_code` int(11) NOT NULL comment 'errorCode', + # `error_desc` varchar(1000) NOT NULL comment 'errorDesc', + # `cache_hit` varchar(100) NOT NULL comment 'cacheHit', + # `delta_time` float NOT NULL comment 'delta_time', + # `model` varchar(1000) NOT NULL comment 'model', + # `query` text NOT NULL comment 'query', + # `hit_query` text NOT NULL comment 'hitQuery', + # `answer` text NOT NULL comment 'answer', + # PRIMARY KEY(`id`) + # ) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'modelcache_query_log'; + # """ + log_table_sql = """CREATE TABLE IF NOT EXISTS modelcache_query_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + gmt_create TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + error_code INTEGER NOT NULL, + error_desc VARCHAR(1000) NOT NULL, + cache_hit VARCHAR(100) NOT NULL, + delta_time REAL NOT NULL, + model VARCHAR(1000) NOT NULL, + query TEXT NOT NULL, + hit_query TEXT NOT NULL, + answer TEXT NOT NULL + ); + """ + + conn = sqlite3.connect(self._url) + try: + cursor = conn.cursor() + cursor.execute(answer_table_sql) + cursor.execute(log_table_sql) + conn.commit() + cursor.close() + conn.close() + finally: + conn.close() + + def _insert(self, data: List): + answer = data[0] + question = data[1] + embedding_data = data[2] + model = data[3] + answer_type = 0 + embedding_data = embedding_data.tobytes() + + table_name = "modelcache_llm_answer" + insert_sql = "INSERT INTO {} (question, answer, answer_type, model, embedding_data) VALUES (?, ?, ?, ?, ?)".format(table_name) + + conn = sqlite3.connect(self._url) + try: + cursor = conn.cursor() + values = (question, answer, answer_type, model, embedding_data) + cursor.execute(insert_sql, values) + conn.commit() + id = cursor.lastrowid + cursor.close() + conn.close() + finally: + conn.close() + return id + + def batch_insert(self, all_data: List[CacheData]): + ids = [] + for data in all_data: + ids.append(self._insert(data)) + return ids + + def insert_query_resp(self, query_resp, **kwargs): + error_code = query_resp.get('errorCode') + error_desc = query_resp.get('errorDesc') + cache_hit = query_resp.get('cacheHit') + model = kwargs.get('model') + query = kwargs.get('query') + delta_time = kwargs.get('delta_time') + hit_query = query_resp.get('hit_query') + answer = query_resp.get('answer') + + if isinstance(hit_query, list): + hit_query = json.dumps(hit_query, ensure_ascii=False) + + table_name = "modelcache_query_log" + insert_sql = "INSERT INTO {} (error_code, error_desc, cache_hit, model, query, delta_time, hit_query, answer) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)".format(table_name) + + conn = sqlite3.connect(self._url) + try: + cursor = conn.cursor() + values = (error_code, error_desc, cache_hit, model, query, delta_time, hit_query, answer) + cursor.execute(insert_sql, values) + conn.commit() + cursor.close() + conn.close() + finally: + conn.close() + + def get_data_by_id(self, key: int): + table_name = "modelcache_llm_answer" + query_sql = "select question, answer, embedding_data, model from {} where id={}".format(table_name, key) + conn = sqlite3.connect(self._url) + try: + cursor = conn.cursor() + cursor.execute(query_sql) + resp = cursor.fetchone() + conn.commit() + cursor.close() + conn.close() + finally: + conn.close() + + if resp is not None and len(resp) == 4: + return resp + else: + return None + + def update_hit_count_by_id(self, primary_id: int): + table_name = "modelcache_llm_answer" + update_sql = "UPDATE {} SET hit_count = hit_count+1 WHERE id={}".format(table_name, primary_id) + + conn = sqlite3.connect(self._url) + try: + cursor = conn.cursor() + cursor.execute(update_sql) + conn.commit() + cursor.close() + conn.close() + finally: + # 关闭连接,将连接返回给连接池 + conn.close() + + def get_ids(self, deleted=True): + pass + + def mark_deleted(self, keys): + table_name = "modelcache_llm_answer" + delete_sql = "Delete from {} WHERE id in ({})".format(table_name, ",".join([str(i) for i in keys])) + conn = sqlite3.connect(self._url) + try: + cursor = conn.cursor() + cursor.execute(delete_sql) + delete_count = cursor.rowcount + conn.commit() + cursor.close() + conn.close() + finally: + conn.close() + return delete_count + + def model_deleted(self, model_name): + table_name = "modelcache_llm_answer" + delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) + conn = sqlite3.connect(self._url) + try: + cursor = conn.cursor() + resp = cursor.execute(delete_sql) + conn.commit() + cursor.close() + conn.close() + finally: + conn.close() + return resp + + def clear_deleted_data(self): + pass + + def count(self, state: int = 0, is_all: bool = False): + pass + + def close(self): + pass + + def count_answers(self): + pass diff --git a/modelcache/manager_mm/vector_data/__init__.py b/modelcache/manager_mm/vector_data/__init__.py new file mode 100644 index 0000000..1bf0642 --- /dev/null +++ b/modelcache/manager_mm/vector_data/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +from modelcache.utils.lazy_import import LazyImport + +vector_manager = LazyImport( + "vector_manager", globals(), "modelcache.manager_mm.vector_data.manager" +) + + +def VectorBase(name: str, **kwargs): + return vector_manager.VectorBase.get(name, **kwargs) diff --git a/modelcache/manager_mm/vector_data/base.py b/modelcache/manager_mm/vector_data/base.py new file mode 100644 index 0000000..7bdf12a --- /dev/null +++ b/modelcache/manager_mm/vector_data/base.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +from abc import ABC, abstractmethod +import numpy as np +from typing import List +from dataclasses import dataclass + + +@dataclass +class VectorData: + id: int + data: np.ndarray + + +class VectorBase(ABC): + """VectorBase: base vector store interface""" + + @abstractmethod + def mul_add(self, datas: List[VectorData], model=None): + pass + + @abstractmethod + def search(self, data: np.ndarray, top_k: int, model): + pass + + @abstractmethod + def rebuild(self, ids=None) -> bool: + pass + + @abstractmethod + def delete(self, ids) -> bool: + pass + + @abstractmethod + def rebuild_col(self, model): + pass + + def flush(self): + pass + + def close(self): + pass diff --git a/modelcache/manager_mm/vector_data/faiss.py b/modelcache/manager_mm/vector_data/faiss.py new file mode 100644 index 0000000..f035ded --- /dev/null +++ b/modelcache/manager_mm/vector_data/faiss.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +import os +from typing import List +import numpy as np +from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.utils import import_faiss +import_faiss() +import faiss # pylint: disable=C0413 + + +class Faiss(VectorBase): + def __init__(self, index_file_path, dimension, top_k): + self._index_file_path = index_file_path + self._dimension = dimension + self._index = faiss.index_factory(self._dimension, "IDMap,Flat", faiss.METRIC_L2) + self._top_k = top_k + if os.path.isfile(index_file_path): + self._index = faiss.read_index(index_file_path) + + def mul_add(self, datas: List[VectorData], model=None): + data_array, id_array = map(list, zip(*((data.data, data.id) for data in datas))) + np_data = np.array(data_array).astype("float32") + ids = np.array(id_array) + self._index.add_with_ids(np_data, ids) + + def search(self, data: np.ndarray, top_k: int = -1, model=None): + if self._index.ntotal == 0: + return None + if top_k == -1: + top_k = self._top_k + np_data = np.array(data).astype("float32").reshape(1, -1) + dist, ids = self._index.search(np_data, top_k) + ids = [int(i) for i in ids[0]] + return list(zip(dist[0], ids)) + + def rebuild_col(self, ids=None): + return True + + def rebuild(self, ids=None): + return True + + def delete(self, ids): + ids_to_remove = np.array(ids) + self._index.remove_ids(faiss.IDSelectorBatch(ids_to_remove.size, faiss.swig_ptr(ids_to_remove))) + + def flush(self): + faiss.write_index(self._index, self._index_file_path) + + def close(self): + self.flush() + + def count(self): + return self._index.ntotal diff --git a/modelcache/manager_mm/vector_data/manager.py b/modelcache/manager_mm/vector_data/manager.py new file mode 100644 index 0000000..6049107 --- /dev/null +++ b/modelcache/manager_mm/vector_data/manager.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +from modelcache.utils.error import NotFoundError, ParamError + +TOP_K = 1 +FAISS_INDEX_PATH = "faiss.index" +DIMENSION = 0 +MILVUS_HOST = "localhost" +MILVUS_PORT = 19530 +MILVUS_USER = "" +MILVUS_PSW = "" +MILVUS_SECURE = False +MILVUS_INDEX_PARAMS = { + "metric_type": "L2", + "index_type": "HNSW", + "params": {"M": 8, "efConstruction": 64}, +} + +COLLECTION_NAME = "modelcache" + + +class VectorBase: + """ + VectorBase to manager the vector base. + """ + + def __init__(self): + raise EnvironmentError( + "VectorBase is designed to be instantiated, please using the `VectorBase.get(name)`." + ) + + @staticmethod + def check_dimension(dimension): + if dimension <= 0: + raise ParamError( + f"the dimension should be greater than zero, current value: {dimension}." + ) + + @staticmethod + def get(name, **kwargs): + top_k = kwargs.get("top_k", TOP_K) + if name == "milvus": + from modelcache.manager.vector_data.milvus import Milvus + milvus_config = kwargs.get("milvus_config") + dimension = kwargs.get("dimension", DIMENSION) + VectorBase.check_dimension(dimension) + host = milvus_config.get('milvus', 'host') + port = milvus_config.get('milvus', 'port') + user = milvus_config.get('milvus', 'user') + password = milvus_config.get('milvus', 'password') + + secure = kwargs.get("secure", MILVUS_SECURE) + collection_name = kwargs.get("collection_name", COLLECTION_NAME) + index_params = kwargs.get("index_params", MILVUS_INDEX_PARAMS) + search_params = kwargs.get("search_params", None) + local_mode = kwargs.get("local_mode", False) + local_data = kwargs.get("local_data", "./milvus_data") + vector_base = Milvus( + host=host, + port=port, + user=user, + password=password, + secure=secure, + collection_name=collection_name, + dimension=dimension, + top_k=top_k, + index_params=index_params, + search_params=search_params, + local_mode=local_mode, + local_data=local_data + ) + elif name == "redis": + from modelcache.manager.vector_data.redis import RedisVectorStore + redis_config = kwargs.get("redis_config") + + mm_dimension = kwargs.get("mm_dimension", DIMENSION) + print('mm_dimension: {}'.format(mm_dimension)) + i_dimension = kwargs.get("i_dimension", DIMENSION) + t_dimension = kwargs.get("t_dimension", DIMENSION) + VectorBase.check_dimension(mm_dimension) + VectorBase.check_dimension(i_dimension) + VectorBase.check_dimension(t_dimension) + + host = redis_config.get('redis', 'host') + port = redis_config.get('redis', 'port') + user = redis_config.get('redis', 'user') + password = redis_config.get('redis', 'password') + namespace = kwargs.get("namespace", "") + # collection_name = kwargs.get("collection_name", COLLECTION_NAME) + + vector_base = RedisVectorStore( + host=host, + port=port, + username=user, + password=password, + namespace=namespace, + top_k=top_k, + mm_dimension=mm_dimension, + i_dimension=i_dimension, + t_dimension=t_dimension, + ) + elif name == "faiss": + from modelcache.manager.vector_data.faiss import Faiss + + dimension = kwargs.get("dimension", DIMENSION) + index_path = kwargs.pop("index_path", FAISS_INDEX_PATH) + VectorBase.check_dimension(dimension) + vector_base = Faiss( + index_file_path=index_path, dimension=dimension, top_k=top_k + ) + elif name == "chromadb": + from modelcache.manager.vector_data.chroma import Chromadb + + client_settings = kwargs.get("client_settings", None) + persist_directory = kwargs.get("persist_directory", None) + collection_name = kwargs.get("collection_name", COLLECTION_NAME) + vector_base = Chromadb( + client_settings=client_settings, + persist_directory=persist_directory, + collection_name=collection_name, + top_k=top_k, + ) + elif name == "hnswlib": + from modelcache.manager.vector_data.hnswlib_store import Hnswlib + + dimension = kwargs.get("dimension", DIMENSION) + index_path = kwargs.pop("index_path", "./hnswlib_index.bin") + max_elements = kwargs.pop("max_elements", 100000) + VectorBase.check_dimension(dimension) + vector_base = Hnswlib( + index_file_path=index_path, dimension=dimension, + top_k=top_k, max_elements=max_elements + ) + else: + raise NotFoundError("vector store", name) + return vector_base diff --git a/modelcache/manager_mm/vector_data/milvus.py b/modelcache/manager_mm/vector_data/milvus.py new file mode 100644 index 0000000..50d6ab1 --- /dev/null +++ b/modelcache/manager_mm/vector_data/milvus.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +import logging +from typing import List +from uuid import uuid4 +import numpy as np +from modelcache.utils import import_pymilvus +from modelcache.utils.log import modelcache_log +from modelcache.manager.vector_data.base import VectorBase, VectorData + + +import_pymilvus() + +from pymilvus import ( # pylint: disable=C0413 + connections, + utility, + FieldSchema, + DataType, + CollectionSchema, + Collection, + MilvusException, +) + + +class Milvus(VectorBase): + SEARCH_PARAM = { + "IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}}, + "IVF_SQ8": {"metric_type": "L2", "params": {"nprobe": 10}}, + "IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}}, + "HNSW": {"metric_type": "L2", "params": {"ef": 10}}, + "RHNSW_FLAT": {"metric_type": "L2", "params": {"ef": 10}}, + "RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}}, + "RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}}, + "IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}}, + "ANNOY": {"metric_type": "L2", "params": {"search_k": 10}}, + "AUTOINDEX": {"metric_type": "L2", "params": {}}, + } + + def __init__( + self, + host: str = "localhost", + port: str = "19530", + user: str = "", + password: str = "", + secure: bool = False, + collection_name: str = "modelcache", + dimension: int = 0, + top_k: int = 1, + index_params: dict = None, + search_params: dict = None, + local_mode: bool = False, + local_data: str = "./milvus_data" + ): + if dimension <= 0: + raise ValueError( + f"invalid `dim` param: {dimension} in the Milvus vector store." + ) + self._local_mode = local_mode + self._local_data = local_data + self.dimension = dimension + self.top_k = top_k + self.index_params = index_params + if self._local_mode: + self._create_local(port, local_data) + self._connect(host, port, user, password, secure) + self.collection_name = collection_name + self.search_params = ( + search_params or self.SEARCH_PARAM[self.index_params["index_type"]] + ) + + def _connect(self, host, port, user, password, secure): + try: + i = [ + connections.get_connection_addr(x[0]) + for x in connections.list_connections() + ].index({"host": host, "port": port}) + self.alias = connections.list_connections()[i][0] + except ValueError: + # Connect to the Milvus instance using the passed in Environment variables + self.alias = uuid4().hex + connections.connect( + alias=self.alias, + host=host, + port=port, + user=user, # type: ignore + password=password, # type: ignore + secure=secure, + timeout=10 + ) + + def _create_collection(self, collection_name): + if not utility.has_collection(collection_name, using=self.alias): + schema = [ + FieldSchema( + name="id", + dtype=DataType.INT64, + is_primary=True, + auto_id=False, + ), + FieldSchema( + name="embedding", dtype=DataType.FLOAT_VECTOR, dim=self.dimension + ), + ] + schema = CollectionSchema(schema) + self.col = Collection( + collection_name, + schema=schema, + consistency_level="Session", + using=self.alias, + ) + else: + modelcache_log.warning("The %s collection already exists, and it will be used directly.", collection_name) + self.col = Collection( + collection_name, consistency_level="Session", using=self.alias + ) + + if len(self.col.indexes) == 0: + try: + modelcache_log.info("Attempting creation of Milvus index.") + self.col.create_index("embedding", index_params=self.index_params) + modelcache_log.info("Creation of Milvus index successful.") + except MilvusException as e: + modelcache_log.warning("Error with building index: %s, and attempting creation of default index.", e) + i_p = {"metric_type": "L2", "index_type": "AUTOINDEX", "params": {}} + self.col.create_index("embedding", index_params=i_p) + self.index_params = i_p + else: + self.index_params = self.col.indexes[0].to_dict()["index_param"] + + self.col.load() + + def _get_collection(self, collection_name): + self.col = Collection( + collection_name, consistency_level="Session", using=self.alias + ) + self.col.load() + + def mul_add(self, datas: List[VectorData], model=None): + collection_name_model = self.collection_name + '_' + model + self._create_collection(collection_name_model) + + data_array, id_array = map(list, zip(*((data.data, data.id) for data in datas))) + np_data = np.array(data_array).astype("float32") + entities = [id_array, np_data] + self.col.insert(entities) + + def search(self, data: np.ndarray, top_k: int = -1, model=None): + if top_k == -1: + top_k = self.top_k + collection_name_model = self.collection_name + '_' + model + self._create_collection(collection_name_model) + search_result = self.col.search( + data=data.reshape(1, -1).tolist(), + anns_field="embedding", + param=self.search_params, + limit=top_k, + ) + return list(zip(search_result[0].distances, search_result[0].ids)) + + def delete(self, ids, model=None): + collection_name_model = self.collection_name + '_' + model + self._get_collection(collection_name_model) + + del_ids = ",".join([str(x) for x in ids]) + resp = self.col.delete(f"id in [{del_ids}]") + delete_count = resp.delete_count + return delete_count + + def rebuild_col(self, model): + collection_name_model = self.collection_name + '_' + model + + # if col exist, drop col + if not utility.has_collection(collection_name_model, using=self.alias): + return 'model collection not found, please check!' + utility.drop_collection(collection_name_model, using=self.alias) + try: + self._create_collection(collection_name_model) + except Exception as e: + logging.info('create_collection: {}'.format(e)) + + def rebuild(self, ids=None): # pylint: disable=unused-argument + self.col.compact() + + def flush(self): + self.col.flush(_async=True) + + def close(self): + self.flush() + if self._local_mode: + self._server.stop() diff --git a/modelcache/manager_mm/vector_data/redis.py b/modelcache/manager_mm/vector_data/redis.py new file mode 100644 index 0000000..8a8fb37 --- /dev/null +++ b/modelcache/manager_mm/vector_data/redis.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +from typing import List +import numpy as np +from redis.commands.search.indexDefinition import IndexDefinition, IndexType +from redis.commands.search.query import Query +from redis.commands.search.field import TagField, VectorField, NumericField +from redis.client import Redis + +from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.utils import import_redis +from modelcache.utils.log import modelcache_log +from modelcache.utils.index_util import get_index_name +from modelcache.utils.index_util import get_index_prefix +import_redis() + + +class RedisVectorStore(VectorBase): + def __init__( + self, + host: str = "localhost", + port: str = "6379", + username: str = "", + password: str = "", + dimension: int = 0, + top_k: int = 1, + namespace: str = "", + ): + if dimension <= 0: + raise ValueError( + f"invalid `dim` param: {dimension} in the Milvus vector store." + ) + self._client = Redis( + host=host, port=int(port), username=username, password=password + ) + self.top_k = top_k + self.dimension = dimension + self.namespace = namespace + self.doc_prefix = f"{self.namespace}doc:" + + def _check_index_exists(self, index_name: str) -> bool: + """Check if Redis index exists.""" + try: + self._client.ft(index_name).info() + except: + modelcache_log.info("Index does not exist") + return False + modelcache_log.info("Index already exists") + return True + + def create_index(self, index_name, index_prefix): + dimension = self.dimension + print('dimension: {}'.format(dimension)) + if self._check_index_exists(index_name): + modelcache_log.info( + "The %s already exists, and it will be used directly", index_name + ) + return 'already_exists' + else: + id_field_name = "data_id" + embedding_field_name = "data_vector" + + id = NumericField(name=id_field_name) + embedding = VectorField(embedding_field_name, + "HNSW", { + "TYPE": "FLOAT32", + "DIM": dimension, + "DISTANCE_METRIC": "L2", + "INITIAL_CAP": 1000, + } + ) + fields = [id, embedding] + definition = IndexDefinition(prefix=[index_prefix], index_type=IndexType.HASH) + + # create Index + self._client.ft(index_name).create_index( + fields=fields, definition=definition + ) + return 'create_success' + + def mul_add(self, datas: List[VectorData], model=None): + # pipe = self._client.pipeline() + for data in datas: + id: int = data.id + embedding = data.data.astype(np.float32).tobytes() + id_field_name = "data_id" + embedding_field_name = "data_vector" + obj = {id_field_name: id, embedding_field_name: embedding} + index_prefix = get_index_prefix(model) + self._client.hset(f"{index_prefix}{id}", mapping=obj) + + def search(self, data: np.ndarray, top_k: int = -1, model=None): + index_name = get_index_name(model) + id_field_name = "data_id" + embedding_field_name = "data_vector" + + base_query = f'*=>[KNN 2 @{embedding_field_name} $vector AS distance]' + query = ( + Query(base_query) + .sort_by("distance") + .return_fields(id_field_name, "distance") + .dialect(2) + ) + + query_params = {"vector": data.astype(np.float32).tobytes()} + results = ( + self._client.ft(index_name) + .search(query, query_params=query_params) + .docs + ) + return [(float(result.distance), int(getattr(result, id_field_name))) for result in results] + + def rebuild(self, ids=None) -> bool: + pass + + def rebuild_col(self, model): + index_name_model = get_index_name(model) + if self._check_index_exists(index_name_model): + try: + self._client.ft(index_name_model).dropindex(delete_documents=True) + except Exception as e: + raise ValueError(str(e)) + try: + index_prefix = get_index_prefix(model) + self.create_index(index_name_model, index_prefix) + except Exception as e: + raise ValueError(str(e)) + # return 'rebuild success' + + def delete(self, ids) -> None: + pipe = self._client.pipeline() + for data_id in ids: + pipe.delete(f"{self.doc_prefix}{data_id}") + pipe.execute() + + def create(self, model=None): + index_name = get_index_name(model) + index_prefix = get_index_prefix(model) + return self.create_index(index_name, index_prefix) + + def get_index_by_name(self, index_name): + pass diff --git a/modelcache/processor/pre.py b/modelcache/processor/pre.py index 5875294..13bc8f4 100644 --- a/modelcache/processor/pre.py +++ b/modelcache/processor/pre.py @@ -108,3 +108,14 @@ def multi_analysis(dialog_str): # 输出结果 return result_list + + +def mm_insert_dict(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + print('chat_info: {}'.format(data.get("chat_info"))) + query_dict = data.get("chat_info")[-1]['query'] + return query_dict + + +def mm_query_dict(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + query_dict = data.get("query") + return query_dict diff --git a/multicache_serving.py b/multicache_serving.py index 23a8bac..1a0afde 100644 --- a/multicache_serving.py +++ b/multicache_serving.py @@ -15,21 +15,18 @@ from typing import Dict import time import json -import uuid -from gptcache import cache -from gptcache.adapter import codegpt -from gptcache.manager import CacheBase, VectorBase, get_data_manager -from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation -from gptcache.processor.pre import insert_iat_dict -from gptcache.processor.pre import query_iat_dict -from gptcache.utils.env_config import get_table_suffix +import configparser +from modelcache import cache +# from modelcache.adapter import adapter +from modelcache.adapter_mm import adapter +# from modelcache.manager import CacheBase, VectorBase, get_data_manager +from modelcache.manager_mm import CacheBase, VectorBase, get_data_manager +from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation +from modelcache.processor.pre import mm_insert_dict +from modelcache.processor.pre import mm_query_dict from concurrent.futures import ThreadPoolExecutor -from gptcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi -from gptcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi_concurrent_sin -# from gptcache.utils.modle_version_manager import model_version_load -# from gptcache.utils.modle_version_manager import get_all_collections -# from gptcache.utils.collection_util import get_collection_iat_name -# from gptcache.utils.collection_util import get_collection_iat_type +from modelcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi +from modelcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi_concurrent_sin def save_query_info(result, model, query, delta_time_log): @@ -55,43 +52,37 @@ def response_hitquery(cache_resp): # python类示例 class UserBackend: def __init__(self): - self.table_suffix = get_table_suffix() image_dimension = 768 text_dimension = 768 - # data_manager = get_data_manager(CacheBase("oceanbase", table_suffix=self.table_suffix), - # VectorBase("milvus", iat_dimension=image_dimension + text_dimension, - # i_dimension=image_dimension, t_dimension=text_dimension, - # table_suffix=self.table_suffix)) - data_manager = get_data_manager(CacheBase("oceanbase", table_suffix=self.table_suffix), - VectorBase("redis", iat_dimension=image_dimension+text_dimension, + mysql_config = configparser.ConfigParser() + mysql_config.read('modelcache/config/mysql_config.ini') + + # milvus_config = configparser.ConfigParser() + # milvus_config.read('modelcache/config/milvus_config.ini') + + redis_config = configparser.ConfigParser() + redis_config.read('modelcache/config/redis_config.ini') + + data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), + VectorBase("redis", mm_dimension=image_dimension+text_dimension, i_dimension=image_dimension, t_dimension=text_dimension, - table_suffix=self.table_suffix)) + redis_config=redis_config)) cache.init( - # embedding_func=get_cache_embedding_text2vec, - # image_embedding_func=timm2vec.to_embeddings, - # text_embedding_func=text2vec.to_embeddings, embedding_func=get_embedding_multi, - embedding_concur_func=get_embedding_multi_concurrent_sin, + embedding_concurrent_func=get_embedding_multi_concurrent_sin, data_manager=data_manager, similarity_evaluation=SearchDistanceEvaluation(), - # iat_query_pre_embedding_func=query_multi_splicing, - iat_insert_pre_embedding_func=insert_iat_dict, - iat_query_pre_embedding_func=query_iat_dict, - # insert_pre_embedding_miulti_func=insert_multimodal_splicing, - # query_pre_embedding_miulti_func=query_multimodal_splicing, + mm_insert_pre_embedding_func=mm_insert_dict, + mm_query_pre_embedding_func=mm_query_dict, ) - # cache.set_openai_key() self.gptcache_version = datetime.now().strftime("%Y-%m-%d %H:%M") self.executor = ThreadPoolExecutor(max_workers=6) def __call__(self, param): - print('gptcache_version: {}'.format(self.gptcache_version)) - # logging.info('gptcache_version: {}'.format(self.gptcache_version)) + print('mm_version: {}'.format(self.gptcache_version)) print('call_time: {}'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) - # logging.info('call_time: {}'.format(datetime.now().strftime("%Y-%m-%d %H:%M"))) try: - # print('param: {}'.format(param)) param_dict = json.loads(param) except Exception as e: result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', @@ -128,10 +119,8 @@ def __call__(self, param): except Exception as e: result = {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - # cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) return json.dumps(result) - # --------分割线 if request_type == 'iat_query': if UUID: try: @@ -143,7 +132,7 @@ def __call__(self, param): print('uuid_e: {}'.format(e)) try: start_time = time.time() - response = codegpt.ChatCompletion.create_iat_query( + response = adapter.ChatCompletion.create_iat_query( scope={"model": model}, query=query, ) @@ -173,12 +162,6 @@ def __call__(self, param): print('result: {}'.format(result)) return json.dumps(result, ensure_ascii=False) - # response = codegpt.ChatCompletion.create_iat_query( - # scope={"model": model}, - # query=query, - # ) - # print('response_query: {}'.format(response)) - if request_type == 'iat_insert': if UUID: try: @@ -191,7 +174,7 @@ def __call__(self, param): try: start_time = time.time() try: - response = codegpt.ChatCompletion.create_iat_insert( + response = adapter.ChatCompletion.create_iat_insert( model=model, chat_info=chat_info, ) @@ -211,19 +194,12 @@ def __call__(self, param): print('result: {}'.format(result)) return json.dumps(result, ensure_ascii=False) - # response = codegpt.ChatCompletion.create_iat_insert( - # model=model, - # chat_info=chat_info, - # milvus_collection_ins=collection_ins - # ) - # print('response: {}'.format(response)) - if request_type == 'iat_remove': remove_type = param_dict.get("remove_type") id_list = param_dict.get("id_list", []) print('remove_type: {}'.format(remove_type)) - response = codegpt.ChatCompletion.create_iat_remove( + response = adapter.ChatCompletion.create_iat_remove( model=model, remove_type=remove_type, id_list=id_list @@ -243,10 +219,9 @@ def __call__(self, param): if request_type == 'iat_register': iat_type = param_dict.get("iat_type") - response = codegpt.ChatCompletion.create_iat_register( + response = adapter.ChatCompletion.create_register( model=model, - iat_type=iat_type, - table_suffix=self.table_suffix + iat_type=iat_type ) if response in ['create_success', 'already_exists']: result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} @@ -286,15 +261,15 @@ def __health_check__(self): # r1 = json.dumps(data_dict) # ============02 - request_type = 'iat_query' - UUID = str(uuid.uuid1()) + "==>" + str(time.time()) - scope = {"model": "test_0313"} - img_data = 'http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg' - query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], - 'imageRaw': '', - 'imageUrl': img_data, - 'multiType': 'IMG_TEXT'} - r1 = json.dumps({'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID}) + # request_type = 'iat_query' + # UUID = str(uuid.uuid1()) + "==>" + str(time.time()) + # scope = {"model": "test_0313"} + # img_data = 'http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg' + # query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], + # 'imageRaw': '', + # 'imageUrl': img_data, + # 'multiType': 'IMG_TEXT'} + # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID}) # ============03 # request_type = 'iat_remove' @@ -304,10 +279,10 @@ def __health_check__(self): # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'remove_type': remove_type}) # ============04 - # request_type = 'iat_register' - # scope = {"model": "test_0313"} - # iat_type = 'IMG_TEXT' - # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'iat_type': iat_type}) + request_type = 'iat_register' + scope = {"model": "test_0313"} + iat_type = 'IMG_TEXT' + r1 = json.dumps({'request_type': request_type, 'scope': scope, 'iat_type': iat_type}) user_backend = UserBackend() resp = user_backend(r1) From 1871eec5d595df2dc2c62084e95340d4b83315a3 Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 16 Apr 2024 11:16:48 +0800 Subject: [PATCH 23/98] fix manage redis --- modelcache/manager/vector_data/redis.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index a3f7faf..70fd7d3 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -22,22 +22,21 @@ def __init__( username: str = "", password: str = "", # dimension: int = 0, - mm_dimension: int = 0, + dimension: int = 0, i_dimension: int = 0, t_dimension: int = 0, top_k: int = 1, namespace: str = "", ): - if mm_dimension <= 0: + if dimension <= 0: raise ValueError( - f"invalid `dim` param: {mm_dimension} in the Redis vector store." + f"invalid `dim` param: {dimension} in the Redis vector store." ) self._client = Redis( host=host, port=int(port), username=username, password=password ) self.top_k = top_k - # self.dimension = dimension - self.mm_dimension = mm_dimension + self.dimension = dimension self.i_dimension = i_dimension self.t_dimension = t_dimension self.namespace = namespace From e3eac54b7eceb24823ebdfd0c9182de0bcc0f1de Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 16 Apr 2024 16:30:59 +0800 Subject: [PATCH 24/98] mm cache func develope --- modelcache/core.py | 6 ++++ modelcache/manager/vector_data/redis.py | 5 --- modelcache/manager_mm/vector_data/manager.py | 2 +- modelcache/manager_mm/vector_data/redis.py | 32 +++++++++++++++----- modelcache/utils/index_util.py | 24 +++++++++++++++ multicache_serving.py | 4 +++ 6 files changed, 59 insertions(+), 14 deletions(-) diff --git a/modelcache/core.py b/modelcache/core.py index 2f633f5..8cb1745 100644 --- a/modelcache/core.py +++ b/modelcache/core.py @@ -17,6 +17,8 @@ class Cache: def __init__(self): self.has_init = False self.cache_enable_func = None + self.query_pre_embedding_func = None + self.insert_pre_embedding_func = None self.mm_query_pre_embedding_func = None self.mm_insert_pre_embedding_func = None self.embedding_func = None @@ -31,6 +33,8 @@ def __init__(self): def init( self, cache_enable_func=cache_all, + query_pre_embedding_func=None, + insert_pre_embedding_func=None, mm_query_pre_embedding_func=None, mm_insert_pre_embedding_func=None, embedding_func=string_embedding, @@ -43,6 +47,8 @@ def init( ): self.has_init = True self.cache_enable_func = cache_enable_func + self.query_pre_embedding_func = query_pre_embedding_func + self.insert_pre_embedding_func = insert_pre_embedding_func self.mm_query_pre_embedding_func = mm_query_pre_embedding_func self.mm_insert_pre_embedding_func = mm_insert_pre_embedding_func self.embedding_func = embedding_func diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index 70fd7d3..b93893d 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -21,10 +21,7 @@ def __init__( port: str = "6379", username: str = "", password: str = "", - # dimension: int = 0, dimension: int = 0, - i_dimension: int = 0, - t_dimension: int = 0, top_k: int = 1, namespace: str = "", ): @@ -37,8 +34,6 @@ def __init__( ) self.top_k = top_k self.dimension = dimension - self.i_dimension = i_dimension - self.t_dimension = t_dimension self.namespace = namespace self.doc_prefix = f"{self.namespace}doc:" diff --git a/modelcache/manager_mm/vector_data/manager.py b/modelcache/manager_mm/vector_data/manager.py index 6049107..bb08247 100644 --- a/modelcache/manager_mm/vector_data/manager.py +++ b/modelcache/manager_mm/vector_data/manager.py @@ -69,7 +69,7 @@ def get(name, **kwargs): local_data=local_data ) elif name == "redis": - from modelcache.manager.vector_data.redis import RedisVectorStore + from modelcache.manager_mm.vector_data.redis import RedisVectorStore redis_config = kwargs.get("redis_config") mm_dimension = kwargs.get("mm_dimension", DIMENSION) diff --git a/modelcache/manager_mm/vector_data/redis.py b/modelcache/manager_mm/vector_data/redis.py index 8a8fb37..3474d8b 100644 --- a/modelcache/manager_mm/vector_data/redis.py +++ b/modelcache/manager_mm/vector_data/redis.py @@ -21,19 +21,23 @@ def __init__( port: str = "6379", username: str = "", password: str = "", - dimension: int = 0, + mm_dimension: int = 0, + i_dimension: int = 0, + t_dimension: int = 0, top_k: int = 1, namespace: str = "", ): - if dimension <= 0: + if mm_dimension <= 0: raise ValueError( - f"invalid `dim` param: {dimension} in the Milvus vector store." + f"invalid `dim` param: {mm_dimension} in the Milvus vector store." ) self._client = Redis( host=host, port=int(port), username=username, password=password ) self.top_k = top_k - self.dimension = dimension + self.mm_dimension = mm_dimension + self.i_dimension = i_dimension + self.t_dimension = t_dimension self.namespace = namespace self.doc_prefix = f"{self.namespace}doc:" @@ -47,8 +51,16 @@ def _check_index_exists(self, index_name: str) -> bool: modelcache_log.info("Index already exists") return True - def create_index(self, index_name, index_prefix): - dimension = self.dimension + def create_index(self, index_name, mm_type, index_prefix): + # dimension = self.dimension + if mm_type == 'IMG_TEXT': + dimension = self.mm_dimension + elif mm_type == 'IMG': + dimension = self.i_dimension + elif mm_type == 'TEXT': + dimension = self.t_dimension + else: + raise ValueError('dimension type exception') print('dimension: {}'.format(dimension)) if self._check_index_exists(index_name): modelcache_log.info( @@ -77,13 +89,17 @@ def create_index(self, index_name, index_prefix): ) return 'create_success' - def mul_add(self, datas: List[VectorData], model=None): - # pipe = self._client.pipeline() + def mul_add(self, datas: List[VectorData], model=None, mm_type=None): for data in datas: id: int = data.id embedding = data.data.astype(np.float32).tobytes() + + collection_name = get_collection_iat_name(model, mm_type) + index_prefix = get_collection_iat_prefix(model, mm_type) + id_field_name = "data_id" embedding_field_name = "data_vector" + obj = {id_field_name: id, embedding_field_name: embedding} index_prefix = get_index_prefix(model) self._client.hset(f"{index_prefix}{id}", mapping=obj) diff --git a/modelcache/utils/index_util.py b/modelcache/utils/index_util.py index be6e856..bbf0946 100644 --- a/modelcache/utils/index_util.py +++ b/modelcache/utils/index_util.py @@ -7,3 +7,27 @@ def get_index_name(model): def get_index_prefix(model): return 'prefix' + '_' + model + + +def get_mm_index_name(model, iat_type): + if iat_type not in ['IMG_TEXT', 'iat', 'IMG', 'image', 'TEXT', 'text']: + raise ValueError('iat_type is not normal!') + if iat_type == 'IMG_TEXT': + iat_type = 'iat' + elif iat_type == 'IMG': + iat_type = 'image' + elif iat_type == 'TEXT': + iat_type = 'text' + return 'multicache' + '_' + model + '_' + iat_type + + +def get_collection_iat_prefix(model, iat_type, table_suffix): + if iat_type not in ['IMG_TEXT', 'iat', 'IMG', 'image', 'TEXT', 'text']: + raise ValueError('iat_type is not normal!') + if iat_type == 'IMG_TEXT': + iat_type = 'iat' + elif iat_type == 'IMG': + iat_type = 'image' + elif iat_type == 'TEXT': + iat_type = 'text' + return 'prefix' + '_' + model + '_' + iat_type + '_' + table_suffix \ No newline at end of file diff --git a/multicache_serving.py b/multicache_serving.py index 1a0afde..ece9487 100644 --- a/multicache_serving.py +++ b/multicache_serving.py @@ -27,6 +27,8 @@ from concurrent.futures import ThreadPoolExecutor from modelcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi from modelcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi_concurrent_sin +from modelcache.processor.pre import query_multi_splicing +from modelcache.processor.pre import insert_multi_splicing def save_query_info(result, model, query, delta_time_log): @@ -73,6 +75,8 @@ def __init__(self): embedding_concurrent_func=get_embedding_multi_concurrent_sin, data_manager=data_manager, similarity_evaluation=SearchDistanceEvaluation(), + query_pre_embedding_func=query_multi_splicing, + insert_pre_embedding_func=insert_multi_splicing, mm_insert_pre_embedding_func=mm_insert_dict, mm_query_pre_embedding_func=mm_query_dict, ) From 9fce2e47a45b3f92ade2e399ee1f6eb8169b40b0 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 17 Apr 2024 10:35:50 +0800 Subject: [PATCH 25/98] multimodal insert ability develop --- flask4modelcache.py | 1 - modelcache/adapter_mm/adapter.py | 9 +- modelcache/adapter_mm/adapter_insert.py | 69 +++++++++++++-- modelcache/adapter_mm/adapter_register.py | 5 +- modelcache/manager_mm/data_manager.py | 86 +++++++++++-------- modelcache/manager_mm/factory.py | 27 +----- .../manager_mm/scalar_data/sql_storage.py | 19 ++-- modelcache/manager_mm/vector_data/base.py | 2 +- modelcache/manager_mm/vector_data/redis.py | 45 +++++----- modelcache/utils/index_util.py | 43 +++++----- multicache_serving.py | 85 +++++++++--------- 11 files changed, 219 insertions(+), 172 deletions(-) diff --git a/flask4modelcache.py b/flask4modelcache.py index 3473654..ee5955b 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -177,7 +177,6 @@ def user_backend(): return json.dumps(result) if request_type == 'register': - # iat_type = param_dict.get("iat_type") response = adapter.ChatCompletion.create_register( model=model ) diff --git a/modelcache/adapter_mm/adapter.py b/modelcache/adapter_mm/adapter.py index abde6fb..5f63098 100644 --- a/modelcache/adapter_mm/adapter.py +++ b/modelcache/adapter_mm/adapter.py @@ -3,8 +3,8 @@ from modelcache.adapter_mm.adapter_query import adapt_query from modelcache.adapter_mm.adapter_insert import adapt_insert -from modelcache.adapter.adapter_remove import adapt_remove -from modelcache.adapter.adapter_register import adapt_register +from modelcache.adapter_mm.adapter_remove import adapt_remove +from modelcache.adapter_mm.adapter_register import adapt_register class ChatCompletion(object): @@ -30,7 +30,8 @@ def create_mm_insert(cls, *args, **kwargs): **kwargs ) except Exception as e: - return str(e) + # return str(e) + raise e @classmethod def create_mm_remove(cls, *args, **kwargs): @@ -51,7 +52,7 @@ def create_mm_register(cls, *args, **kwargs): **kwargs ) except Exception as e: - return str(e) + raise e def construct_resp_from_cache(return_message, return_query): diff --git a/modelcache/adapter_mm/adapter_insert.py b/modelcache/adapter_mm/adapter_insert.py index 74aa619..8a06c92 100644 --- a/modelcache/adapter_mm/adapter_insert.py +++ b/modelcache/adapter_mm/adapter_insert.py @@ -1,4 +1,8 @@ # -*- coding: utf-8 -*- +import time +import requests +import base64 +import numpy as np from modelcache import cache from modelcache.utils.error import NotInitError from modelcache.utils.time import time_cal @@ -15,26 +19,77 @@ def adapt_insert(*args, **kwargs): cache_enable = chat_cache.cache_enable_func(*args, **kwargs) context = kwargs.pop("cache_context", {}) embedding_data = None - pre_embedding_data = chat_cache.insert_pre_embedding_func( + pre_embedding_data_dict = chat_cache.mm_insert_pre_embedding_func( kwargs, extra_param=context.get("pre_embedding_func", None), prompts=chat_cache.config.prompts, ) + + print('pre_embedding_data_dict: {}'.format(pre_embedding_data_dict)) chat_info = kwargs.pop("chat_info", []) llm_data = chat_info[-1]['answer'] + pre_embedding_text = '###'.join(pre_embedding_data_dict['text']) + pre_embedding_image_url = pre_embedding_data_dict['imageUrl'] + pre_embedding_image_raw = pre_embedding_data_dict['imageRaw'] + pre_embedding_image_id = pre_embedding_data_dict.get('imageId', None) + + if pre_embedding_image_url and pre_embedding_image_raw: + raise ValueError("Both pre_embedding_image_url and pre_embedding_image_raw cannot be non-empty at the same time.") + + if pre_embedding_image_url: + url_start_time = time.time() + response = requests.get(pre_embedding_image_url) + image_data = response.content + pre_embedding_image = base64.b64encode(image_data).decode('utf-8') + get_image_time = '{}s'.format(round(time.time() - url_start_time, 2)) + print('get_image_time: {}'.format(get_image_time)) + elif pre_embedding_image_raw: + pre_embedding_image = pre_embedding_image_raw + else: + pre_embedding_image = None + if not pre_embedding_text: + raise ValueError( + "Both pre_embedding_image_url and pre_embedding_image_raw are empty. Please provide at least one.") + + data_dict = {'text': [pre_embedding_text], 'image': pre_embedding_image} + embedding_data = None + mm_type = None + if cache_enable: - embedding_data = time_cal( + embedding_data_resp = time_cal( chat_cache.embedding_func, - func_name="embedding", + func_name="image_embedding", report_func=chat_cache.report.embedding, - )(pre_embedding_data) + )(data_dict) + + image_embeddings = embedding_data_resp['image_embedding'] + text_embeddings = embedding_data_resp['text_embeddings'] + + if len(image_embeddings) > 0 and len(image_embeddings) > 0: + image_embedding = np.array(image_embeddings[0]) + text_embedding = text_embeddings[0] + embedding_data = np.concatenate((image_embedding, text_embedding)) + mm_type = 'mm' + elif len(image_embeddings) > 0: + image_embedding = np.array(image_embeddings[0]) + embedding_data = image_embedding + mm_type = 'image' + elif len(text_embeddings) > 0: + text_embedding = np.array(text_embeddings[0]) + embedding_data = text_embedding + mm_type = 'text' + else: + raise ValueError('maya embedding service return both empty list, please check!') chat_cache.data_manager.save( - pre_embedding_data, + pre_embedding_text, + pre_embedding_image_url, + pre_embedding_image_id, llm_data, embedding_data, model=model, - extra_param=context.get("save_func", None) + mm_type=mm_type, + extra_param=context.get("mm_save_func", None) ) - return 'success' + return 'success' \ No newline at end of file diff --git a/modelcache/adapter_mm/adapter_register.py b/modelcache/adapter_mm/adapter_register.py index 53df128..fbec358 100644 --- a/modelcache/adapter_mm/adapter_register.py +++ b/modelcache/adapter_mm/adapter_register.py @@ -5,9 +5,12 @@ def adapt_register(*args, **kwargs): chat_cache = kwargs.pop("cache_obj", cache) model = kwargs.pop("model", None) + mm_type = kwargs.pop("mm_type", None) if model is None or len(model) == 0: return ValueError('') - register_resp = chat_cache.data_manager.create_index(model) + print('mm_type: {}'.format(mm_type)) + print('model: {}'.format(model)) + register_resp = chat_cache.data_manager.create_index(model, mm_type) print('register_resp: {}'.format(register_resp)) return register_resp diff --git a/modelcache/manager_mm/data_manager.py b/modelcache/manager_mm/data_manager.py index d6637ea..1397361 100644 --- a/modelcache/manager_mm/data_manager.py +++ b/modelcache/manager_mm/data_manager.py @@ -25,8 +25,12 @@ class DataManager(metaclass=ABCMeta): """DataManager manage the cache data, including save and search""" + # @abstractmethod + # def save(self, question, answer, embedding_data, **kwargs): + # pass + @abstractmethod - def save(self, question, answer, embedding_data, **kwargs): + def save(self, text, image_url, image_id, answer, embedding, **kwargs): pass @abstractmethod @@ -34,9 +38,8 @@ def save_query_resp(self, query_resp_dict, **kwargs): pass @abstractmethod - def import_data( - self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model:Any - ): + def import_data(self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], + embeddings: List[Any], model: Any, iat_type: Any): pass @abstractmethod @@ -89,21 +92,20 @@ def init(self): f"You don't have permission to access this file <{self.data_path}>." ) - def save(self, question, answer, embedding_data, **kwargs): - if isinstance(question, Question): - question = question.content - self.data[embedding_data] = (question, answer, embedding_data) + # def save(self, question, answer, embedding_data, **kwargs): + # if isinstance(question, Question): + # question = question.content + # self.data[embedding_data] = (question, answer, embedding_data) + + def save(self, text, image_url, image_id, answer, embedding, **kwargs): + pass def save_query_resp(self, query_resp_dict, **kwargs): pass - def import_data( - self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model: Any - ): - if len(questions) != len(answers) or len(questions) != len(embedding_datas): - raise ParamError("Make sure that all parameters have the same length") - for i, embedding_data in enumerate(embedding_datas): - self.data[embedding_data] = (questions[i], answers[i], embedding_datas[i]) + def import_data(self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], + embeddings: List[Any], model: Any, iat_type: Any): + pass def get_scalar_data(self, res_data, **kwargs) -> CacheData: return CacheData(question=res_data[0], answers=res_data[1]) @@ -158,9 +160,15 @@ def __init__( self.v = v self.o = o - def save(self, question, answer, embedding_data, **kwargs): + # def save(self, question, answer, embedding_data, **kwargs): + # model = kwargs.pop("model", None) + # self.import_data([question], [answer], [embedding_data], model) + + def save(self, text, image_url, image_id, answer, embedding, **kwargs): model = kwargs.pop("model", None) - self.import_data([question], [answer], [embedding_data], model) + mm_type = kwargs.pop("mm_type", None) + self.import_data([text], [image_url], [image_id], [answer], + [embedding], model, mm_type) def save_query_resp(self, query_resp_dict, **kwargs): save_query_start_time = time.time() @@ -190,36 +198,38 @@ def _process_question_data(self, question: Union[str, Question]): return Question(question) - def import_data( - self, questions: List[Any], answers: List[Answer], embedding_datas: List[Any], model: Any - ): - if len(questions) != len(answers) or len(questions) != len(embedding_datas): + def import_data(self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], + embeddings: List[Any], model: Any, iat_type: Any): + if len(texts) != len(answers): raise ParamError("Make sure that all parameters have the same length") cache_datas = [] - embedding_datas = [ - normalize(embedding_data) for embedding_data in embedding_datas + embeddings = [ + normalize(text_embedding) for text_embedding in embeddings ] - for i, embedding_data in enumerate(embedding_datas): + # print('embedding_datas: {}'.format(embedding_datas)) + for i, embedding in enumerate(embeddings): if self.o is not None: ans = self._process_answer_data(answers[i]) else: ans = answers[i] - - question = questions[i] - embedding_data = embedding_data.astype("float32") - cache_datas.append([ans, question, embedding_data, model]) - - ids = self.s.batch_insert(cache_datas) - logging.info('ids: {}'.format(ids)) - self.v.mul_add( + text = texts[i] + image_url = image_urls[i] + image_id = image_ids[i] + # iat_embedding = embedding.astype("float32") + cache_datas.append([ans, text, image_url, image_id, model]) + + # ids = self.s.batch_multimodal_insert(cache_datas) + ids = self.s.batch_iat_insert(cache_datas) + # self.v.multimodal_add( + self.v.iat_add( [ - VectorData(id=ids[i], data=embedding_data) - for i, embedding_data in enumerate(embedding_datas) + VectorData(id=ids[i], data=embedding) + for i, embedding in enumerate(embeddings) ], - model - + model, + iat_type ) def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: @@ -256,8 +266,8 @@ def delete(self, id_list, **kwargs): return {'status': 'success', 'milvus': 'delete_count: '+str(v_delete_count), 'mysql': 'delete_count: '+str(s_delete_count)} - def create_index(self, model, **kwargs): - return self.v.create(model) + def create_index(self, model, mm_type, **kwargs): + return self.v.create(model, mm_type) def truncate(self, model_name): # drop vector base data diff --git a/modelcache/manager_mm/factory.py b/modelcache/manager_mm/factory.py index 08baf6b..df920d2 100644 --- a/modelcache/manager_mm/factory.py +++ b/modelcache/manager_mm/factory.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from typing import Union, Callable -from modelcache.manager import CacheBase, VectorBase, ObjectBase -from modelcache.manager.data_manager import SSDataManager, MapDataManager +from modelcache.manager_mm import CacheBase, VectorBase, ObjectBase +from modelcache.manager_mm.data_manager import SSDataManager, MapDataManager def get_data_manager( @@ -25,26 +25,3 @@ def get_data_manager( object_base = ObjectBase(name=object_base) assert cache_base and vector_base return SSDataManager(cache_base, vector_base, object_base, max_size, clean_size, eviction) - - -def get_data_manager_mm( - cache_base: Union[CacheBase, str] = None, - vector_base: Union[VectorBase, str] = None, - object_base: Union[ObjectBase, str] = None, - max_size: int = 1000, - clean_size: int = None, - eviction: str = "LRU", - data_path: str = "data_map.txt", - get_data_container: Callable = None, -): - if not cache_base and not vector_base: - return MapDataManager(data_path, max_size, get_data_container) - - if isinstance(cache_base, str): - cache_base = CacheBase(name=cache_base) - if isinstance(vector_base, str): - vector_base = VectorBase(name=vector_base) - if isinstance(object_base, str): - object_base = ObjectBase(name=object_base) - assert cache_base and vector_base - return SSDataManager(cache_base, vector_base, object_base, max_size, clean_size, eviction) diff --git a/modelcache/manager_mm/scalar_data/sql_storage.py b/modelcache/manager_mm/scalar_data/sql_storage.py index 503217a..e7f2b7a 100644 --- a/modelcache/manager_mm/scalar_data/sql_storage.py +++ b/modelcache/manager_mm/scalar_data/sql_storage.py @@ -36,25 +36,24 @@ def create(self): def _insert(self, data: List): answer = data[0] - question = data[1] - embedding_data = data[2] - model = data[3] + text = data[1] + image_url = data[2] + image_id = data[3] + model = data[4] answer_type = 0 - embedding_data = embedding_data.tobytes() - - table_name = "cache_codegpt_answer" - insert_sql = "INSERT INTO {} (question, answer, answer_type, model, embedding_data) VALUES (%s, %s, %s, %s, _binary%s)".format(table_name) + table_name = "multimodal_answer" + insert_sql = "INSERT INTO {} (question_text, image_url, image_id, answer, answer_type, model) VALUES (%s, %s, %s, %s, %s, %s)".format(table_name) conn = self.pool.connection() try: with conn.cursor() as cursor: - # 执行插入数据操作 - values = (question, answer, answer_type, model, embedding_data) + # data insert operation + values = (text, image_url, image_id, answer, answer_type, model) cursor.execute(insert_sql, values) conn.commit() id = cursor.lastrowid finally: - # 关闭连接,将连接返回给连接池 + # Close the connection and return it back to the connection pool conn.close() return id diff --git a/modelcache/manager_mm/vector_data/base.py b/modelcache/manager_mm/vector_data/base.py index 7bdf12a..1af12ef 100644 --- a/modelcache/manager_mm/vector_data/base.py +++ b/modelcache/manager_mm/vector_data/base.py @@ -31,7 +31,7 @@ def delete(self, ids) -> bool: pass @abstractmethod - def rebuild_col(self, model): + def rebuild_idx(self, model): pass def flush(self): diff --git a/modelcache/manager_mm/vector_data/redis.py b/modelcache/manager_mm/vector_data/redis.py index 3474d8b..fdfcdce 100644 --- a/modelcache/manager_mm/vector_data/redis.py +++ b/modelcache/manager_mm/vector_data/redis.py @@ -6,11 +6,11 @@ from redis.commands.search.field import TagField, VectorField, NumericField from redis.client import Redis -from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.manager_mm.vector_data.base import VectorBase, VectorData from modelcache.utils import import_redis from modelcache.utils.log import modelcache_log -from modelcache.utils.index_util import get_index_name -from modelcache.utils.index_util import get_index_prefix +from modelcache.utils.index_util import get_mm_index_name +from modelcache.utils.index_util import get_mm_index_prefix import_redis() @@ -94,18 +94,17 @@ def mul_add(self, datas: List[VectorData], model=None, mm_type=None): id: int = data.id embedding = data.data.astype(np.float32).tobytes() - collection_name = get_collection_iat_name(model, mm_type) - index_prefix = get_collection_iat_prefix(model, mm_type) + # collection_name = get_mm_index_name(model, mm_type) + index_prefix = get_mm_index_prefix(model, mm_type) id_field_name = "data_id" embedding_field_name = "data_vector" obj = {id_field_name: id, embedding_field_name: embedding} - index_prefix = get_index_prefix(model) self._client.hset(f"{index_prefix}{id}", mapping=obj) - def search(self, data: np.ndarray, top_k: int = -1, model=None): - index_name = get_index_name(model) + def search(self, data: np.ndarray, top_k: int = -1, model=None, mm_type=None): + index_name = get_mm_index_name(model, mm_type) id_field_name = "data_id" embedding_field_name = "data_vector" @@ -116,7 +115,6 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): .return_fields(id_field_name, "distance") .dialect(2) ) - query_params = {"vector": data.astype(np.float32).tobytes()} results = ( self._client.ft(index_name) @@ -128,19 +126,20 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): def rebuild(self, ids=None) -> bool: pass - def rebuild_col(self, model): - index_name_model = get_index_name(model) - if self._check_index_exists(index_name_model): + def rebuild_idx(self, model, mm_type=None): + for mm_type in ['IMG_TEXT', 'TEXT']: + index_name = get_mm_index_name(model, mm_type) + print('remove index_name: {}'.format(index_name)) + if self._check_index_exists(index_name): + try: + self._client.ft(index_name).dropindex(delete_documents=True) + except Exception as e: + raise ValueError(str(e)) try: - self._client.ft(index_name_model).dropindex(delete_documents=True) + index_prefix = get_mm_index_prefix(model, mm_type) + self.create_index(index_name, mm_type, index_prefix) except Exception as e: raise ValueError(str(e)) - try: - index_prefix = get_index_prefix(model) - self.create_index(index_name_model, index_prefix) - except Exception as e: - raise ValueError(str(e)) - # return 'rebuild success' def delete(self, ids) -> None: pipe = self._client.pipeline() @@ -148,10 +147,10 @@ def delete(self, ids) -> None: pipe.delete(f"{self.doc_prefix}{data_id}") pipe.execute() - def create(self, model=None): - index_name = get_index_name(model) - index_prefix = get_index_prefix(model) - return self.create_index(index_name, index_prefix) + def create(self, model=None, mm_type=None): + index_name = get_mm_index_name(model, mm_type) + index_prefix = get_mm_index_prefix(model, mm_type) + return self.create_index(index_name, mm_type, index_prefix) def get_index_by_name(self, index_name): pass diff --git a/modelcache/utils/index_util.py b/modelcache/utils/index_util.py index bbf0946..b78d883 100644 --- a/modelcache/utils/index_util.py +++ b/modelcache/utils/index_util.py @@ -9,25 +9,26 @@ def get_index_prefix(model): return 'prefix' + '_' + model -def get_mm_index_name(model, iat_type): - if iat_type not in ['IMG_TEXT', 'iat', 'IMG', 'image', 'TEXT', 'text']: +def get_mm_index_name(model, mm_type): + print('mm_type: {}'.format(mm_type)) + if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: + raise ValueError('mm_type is not normal!') + if mm_type == 'IMG_TEXT': + mm_type = 'mm' + elif mm_type == 'IMG': + mm_type = 'image' + elif mm_type == 'TEXT': + mm_type = 'text' + return 'multicache' + '_' + model + '_' + mm_type + + +def get_mm_index_prefix(model, mm_type): + if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: raise ValueError('iat_type is not normal!') - if iat_type == 'IMG_TEXT': - iat_type = 'iat' - elif iat_type == 'IMG': - iat_type = 'image' - elif iat_type == 'TEXT': - iat_type = 'text' - return 'multicache' + '_' + model + '_' + iat_type - - -def get_collection_iat_prefix(model, iat_type, table_suffix): - if iat_type not in ['IMG_TEXT', 'iat', 'IMG', 'image', 'TEXT', 'text']: - raise ValueError('iat_type is not normal!') - if iat_type == 'IMG_TEXT': - iat_type = 'iat' - elif iat_type == 'IMG': - iat_type = 'image' - elif iat_type == 'TEXT': - iat_type = 'text' - return 'prefix' + '_' + model + '_' + iat_type + '_' + table_suffix \ No newline at end of file + if mm_type == 'IMG_TEXT': + mm_type = 'mm' + elif mm_type == 'IMG': + mm_type = 'image' + elif mm_type == 'TEXT': + mm_type = 'text' + return 'prefix' + '_' + model + '_' + mm_type diff --git a/multicache_serving.py b/multicache_serving.py index ece9487..77411e2 100644 --- a/multicache_serving.py +++ b/multicache_serving.py @@ -15,6 +15,7 @@ from typing import Dict import time import json +import uuid import configparser from modelcache import cache # from modelcache.adapter import adapter @@ -107,16 +108,16 @@ def __call__(self, param): model = model.replace('.', '_') print('model: {}'.format(model)) - if request_type in ['iat_query', 'iat_insert']: - if request_type == 'iat_query': + if request_type in ['mm_query', 'mm_insert']: + if request_type == 'mm_query': query = param_dict.get("query") - elif request_type == 'iat_insert': + elif request_type == 'mm_insert': chat_info = param_dict.get("chat_info") query = chat_info[-1]['query'] - if request_type is None or request_type not in ['iat_query', 'iat_remove', 'iat_insert', 'iat_register']: + if request_type is None or request_type not in ['mm_query', 'mm_remove', 'mm_insert', 'mm_register']: result = {"errorCode": 102, - "errorDesc": "type exception, should one of ['iat_query', 'iat_insert', 'iat_remove', 'iat_register']", + "errorDesc": "type exception, should one of ['mm_query', 'mm_insert', 'mm_remove', 'mm_register']", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} cache.data_manager.save_query_resp(result, model=model, query='', delta_time=0) return json.dumps(result) @@ -125,7 +126,7 @@ def __call__(self, param): "answer": ''} return json.dumps(result) - if request_type == 'iat_query': + if request_type == 'mm_query': if UUID: try: uuid_list = UUID.split('==>') @@ -136,7 +137,7 @@ def __call__(self, param): print('uuid_e: {}'.format(e)) try: start_time = time.time() - response = adapter.ChatCompletion.create_iat_query( + response = adapter.ChatCompletion.create_mm_query( scope={"model": model}, query=query, ) @@ -166,7 +167,7 @@ def __call__(self, param): print('result: {}'.format(result)) return json.dumps(result, ensure_ascii=False) - if request_type == 'iat_insert': + if request_type == 'mm_insert': if UUID: try: uuid_list = UUID.split('==>') @@ -178,13 +179,14 @@ def __call__(self, param): try: start_time = time.time() try: - response = adapter.ChatCompletion.create_iat_insert( + response = adapter.ChatCompletion.create_mm_insert( model=model, chat_info=chat_info, ) except Exception as e: - result = {"errorCode": 303, "errorDesc": e, "writeStatus": "exception"} - return json.dumps(result, ensure_ascii=False) + # result = {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} + # return json.dumps(result, ensure_ascii=False) + raise e if response == 'success': result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} @@ -194,16 +196,17 @@ def __call__(self, param): print('insert_time: {}'.format(insert_time)) return json.dumps(result, ensure_ascii=False) except Exception as e: - result = {"errorCode": 304, "errorDesc": e, "writeStatus": "exception"} - print('result: {}'.format(result)) - return json.dumps(result, ensure_ascii=False) + # result = {"errorCode": 304, "errorDesc": str(e), "writeStatus": "exception"} + # print('result: {}'.format(result)) + # return json.dumps(result, ensure_ascii=False) + raise e - if request_type == 'iat_remove': + if request_type == 'mm_remove': remove_type = param_dict.get("remove_type") id_list = param_dict.get("id_list", []) print('remove_type: {}'.format(remove_type)) - response = adapter.ChatCompletion.create_iat_remove( + response = adapter.ChatCompletion.create_mm_remove( model=model, remove_type=remove_type, id_list=id_list @@ -221,11 +224,11 @@ def __call__(self, param): result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} return json.dumps(result) - if request_type == 'iat_register': - iat_type = param_dict.get("iat_type") - response = adapter.ChatCompletion.create_register( + if request_type == 'mm_register': + mm_type = param_dict.get("mm_type") + response = adapter.ChatCompletion.create_mm_register( model=model, - iat_type=iat_type + mm_type=mm_type ) if response in ['create_success', 'already_exists']: result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} @@ -249,23 +252,23 @@ def __health_check__(self): if __name__ == '__main__': # ============01 - # request_type = 'iat_insert' - # scope = {"model": "test_0313"} - # # UUID = "820b0052-d9d8-11ee-95f1-52775e3e6fd1" + "==>" + str(time.time()) - # UUID = str(uuid.uuid1()) + "==>" + str(time.time()) - # print('UUID: {}'.format(UUID)) - # img_data = "http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg" - # query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], - # 'imageRaw': '', - # 'imageUrl': img_data, - # 'imageId': 'ccc'} - # answer = "应该注意小孩不要跑到铁轨上" - # chat_info = [{"query": query, "answer": answer}] - # data_dict = {'request_type': request_type, 'scope': scope, 'chat_info': chat_info, 'UUID': UUID} - # r1 = json.dumps(data_dict) + request_type = 'mm_insert' + scope = {"model": "test_0313"} + # UUID = "820b0052-d9d8-11ee-95f1-52775e3e6fd1" + "==>" + str(time.time()) + UUID = str(uuid.uuid1()) + "==>" + str(time.time()) + print('UUID: {}'.format(UUID)) + img_data = "http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg" + query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], + 'imageRaw': '', + 'imageUrl': img_data, + 'imageId': 'ccc'} + answer = "应该注意小孩不要跑到铁轨上" + chat_info = [{"query": query, "answer": answer}] + data_dict = {'request_type': request_type, 'scope': scope, 'chat_info': chat_info, 'UUID': UUID} + r1 = json.dumps(data_dict) # ============02 - # request_type = 'iat_query' + # request_type = 'mm_query' # UUID = str(uuid.uuid1()) + "==>" + str(time.time()) # scope = {"model": "test_0313"} # img_data = 'http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg' @@ -276,17 +279,17 @@ def __health_check__(self): # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID}) # ============03 - # request_type = 'iat_remove' + # request_type = 'mm_remove' # scope = {"model": "test_0313"} - # # iat_type = 'IMG_TEXT' + # # mm_type = 'IMG_TEXT' # remove_type = 'truncate_by_model' # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'remove_type': remove_type}) # ============04 - request_type = 'iat_register' - scope = {"model": "test_0313"} - iat_type = 'IMG_TEXT' - r1 = json.dumps({'request_type': request_type, 'scope': scope, 'iat_type': iat_type}) + # request_type = 'mm_register' + # scope = {"model": "test_0313"} + # mm_type = 'IMG_TEXT' + # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'mm_type': mm_type}) user_backend = UserBackend() resp = user_backend(r1) From 7139dad9009d160873c8b9cad8f795dcb1e33826 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 17 Apr 2024 10:45:47 +0800 Subject: [PATCH 26/98] add redis vector store --- modelcache/manager_mm/data_manager.py | 2 +- modelcache/manager_mm/vector_data/redis.py | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/modelcache/manager_mm/data_manager.py b/modelcache/manager_mm/data_manager.py index 1397361..b7c8a1e 100644 --- a/modelcache/manager_mm/data_manager.py +++ b/modelcache/manager_mm/data_manager.py @@ -221,7 +221,7 @@ def import_data(self, texts: List[Any], image_urls: List[Any], image_ids: List[A cache_datas.append([ans, text, image_url, image_id, model]) # ids = self.s.batch_multimodal_insert(cache_datas) - ids = self.s.batch_iat_insert(cache_datas) + ids = self.s.batch_insert(cache_datas) # self.v.multimodal_add( self.v.iat_add( [ diff --git a/modelcache/manager_mm/vector_data/redis.py b/modelcache/manager_mm/vector_data/redis.py index fdfcdce..2e3cce1 100644 --- a/modelcache/manager_mm/vector_data/redis.py +++ b/modelcache/manager_mm/vector_data/redis.py @@ -89,19 +89,29 @@ def create_index(self, index_name, mm_type, index_prefix): ) return 'create_success' - def mul_add(self, datas: List[VectorData], model=None, mm_type=None): + def add(self, datas: List[VectorData], model=None, mm_type=None): + # pipe = self._client.pipeline() for data in datas: id: int = data.id embedding = data.data.astype(np.float32).tobytes() - - # collection_name = get_mm_index_name(model, mm_type) + # obj = { + # "vector": data.data.astype(np.float32).tobytes(), + # } + # collection_name = self.collection_prefix + '_' + model + '_' + self.table_suffix + # collection_name = get_collection_iat_name(model, iat_type, self.table_suffix) index_prefix = get_mm_index_prefix(model, mm_type) + # print('collection_name: {}'.format(collection_name)) + # id_field_name = collection_name + '_' + "id" + # embedding_field_name = collection_name + '_' + "vec" id_field_name = "data_id" embedding_field_name = "data_vector" obj = {id_field_name: id, embedding_field_name: embedding} + # print('obj: {}'.format(obj)) self._client.hset(f"{index_prefix}{id}", mapping=obj) + # pipe.hset(f"{self.doc_prefix}{key}", mapping=obj) + # pipe.execute() def search(self, data: np.ndarray, top_k: int = -1, model=None, mm_type=None): index_name = get_mm_index_name(model, mm_type) From 69477fae6f77f727cba10bfe79f97bd084fdc0a1 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 17 Apr 2024 11:02:32 +0800 Subject: [PATCH 27/98] multimodal cache modify --- modelcache/manager/scalar_data/sql_storage.py | 2 ++ modelcache/manager_mm/__init__.py | 5 ----- modelcache_mm/__init__.py | 12 ++++++++++++ .../adapter_mm/__init__.py | 0 .../adapter_mm/adapter.py | 8 ++++---- .../adapter_mm/adapter_insert.py | 4 ++++ .../adapter_mm/adapter_query.py | 0 .../adapter_mm/adapter_register.py | 0 .../adapter_mm/adapter_remove.py | 0 modelcache_mm/manager_mm/__init__.py | 1 + .../manager_mm/data_manager.py | 4 +--- .../manager_mm/data_manager_mm.py | 0 .../manager_mm/eviction/__init__.py | 0 .../manager_mm/eviction/base.py | 0 .../manager_mm/eviction/manager.py | 0 .../manager_mm/eviction/memory_cache.py | 0 .../manager_mm/eviction_manager.py | 0 .../manager_mm/factory.py | 4 ++-- .../manager_mm/object_data/__init__.py | 0 .../manager_mm/object_data/base.py | 0 .../manager_mm/scalar_data/__init__.py | 0 .../manager_mm/scalar_data/base.py | 0 .../manager_mm/scalar_data/manager.py | 0 .../manager_mm/scalar_data/sql_storage.py | 0 .../scalar_data/sql_storage_sqlite.py | 0 .../manager_mm/vector_data/__init__.py | 0 .../manager_mm/vector_data/base.py | 2 +- .../manager_mm/vector_data/faiss.py | 0 .../manager_mm/vector_data/manager.py | 2 +- .../manager_mm/vector_data/milvus.py | 0 .../manager_mm/vector_data/redis.py | 18 ++---------------- multicache_serving.py | 4 ++-- 32 files changed, 32 insertions(+), 34 deletions(-) delete mode 100644 modelcache/manager_mm/__init__.py create mode 100644 modelcache_mm/__init__.py rename {modelcache => modelcache_mm}/adapter_mm/__init__.py (100%) rename {modelcache => modelcache_mm}/adapter_mm/adapter.py (84%) rename {modelcache => modelcache_mm}/adapter_mm/adapter_insert.py (95%) rename {modelcache => modelcache_mm}/adapter_mm/adapter_query.py (100%) rename {modelcache => modelcache_mm}/adapter_mm/adapter_register.py (100%) rename {modelcache => modelcache_mm}/adapter_mm/adapter_remove.py (100%) create mode 100644 modelcache_mm/manager_mm/__init__.py rename {modelcache => modelcache_mm}/manager_mm/data_manager.py (98%) rename {modelcache => modelcache_mm}/manager_mm/data_manager_mm.py (100%) rename {modelcache => modelcache_mm}/manager_mm/eviction/__init__.py (100%) rename {modelcache => modelcache_mm}/manager_mm/eviction/base.py (100%) rename {modelcache => modelcache_mm}/manager_mm/eviction/manager.py (100%) rename {modelcache => modelcache_mm}/manager_mm/eviction/memory_cache.py (100%) rename {modelcache => modelcache_mm}/manager_mm/eviction_manager.py (100%) rename {modelcache => modelcache_mm}/manager_mm/factory.py (85%) rename {modelcache => modelcache_mm}/manager_mm/object_data/__init__.py (100%) rename {modelcache => modelcache_mm}/manager_mm/object_data/base.py (100%) rename {modelcache => modelcache_mm}/manager_mm/scalar_data/__init__.py (100%) rename {modelcache => modelcache_mm}/manager_mm/scalar_data/base.py (100%) rename {modelcache => modelcache_mm}/manager_mm/scalar_data/manager.py (100%) rename {modelcache => modelcache_mm}/manager_mm/scalar_data/sql_storage.py (100%) rename {modelcache => modelcache_mm}/manager_mm/scalar_data/sql_storage_sqlite.py (100%) rename {modelcache => modelcache_mm}/manager_mm/vector_data/__init__.py (100%) rename {modelcache => modelcache_mm}/manager_mm/vector_data/base.py (91%) rename {modelcache => modelcache_mm}/manager_mm/vector_data/faiss.py (100%) rename {modelcache => modelcache_mm}/manager_mm/vector_data/manager.py (98%) rename {modelcache => modelcache_mm}/manager_mm/vector_data/milvus.py (100%) rename {modelcache => modelcache_mm}/manager_mm/vector_data/redis.py (87%) diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index 503217a..e44b069 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -40,6 +40,7 @@ def _insert(self, data: List): embedding_data = data[2] model = data[3] answer_type = 0 + print('embedding_data: {}'.format(embedding_data)) embedding_data = embedding_data.tobytes() table_name = "cache_codegpt_answer" @@ -61,6 +62,7 @@ def _insert(self, data: List): def batch_insert(self, all_data: List[CacheData]): ids = [] for data in all_data: + print('data: {}'.format(data)) ids.append(self._insert(data)) return ids diff --git a/modelcache/manager_mm/__init__.py b/modelcache/manager_mm/__init__.py deleted file mode 100644 index 4bfc597..0000000 --- a/modelcache/manager_mm/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- -from modelcache.manager_mm.scalar_data import CacheBase -from modelcache.manager_mm.vector_data import VectorBase -from modelcache.manager_mm.object_data import ObjectBase -from modelcache.manager_mm.factory import get_data_manager diff --git a/modelcache_mm/__init__.py b/modelcache_mm/__init__.py new file mode 100644 index 0000000..73db86c --- /dev/null +++ b/modelcache_mm/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" + Alipay.com Inc. + Copyright (c) 2004-2023 All Rights Reserved. + ------------------------------------------------------ + File Name : __init__.py.py + Author : fuhui.phe + Create Time : 2024/4/17 10:53 + Description : description what the main function of this file + Change Activity: + version0 : 2024/4/17 10:53 by fuhui.phe init +""" diff --git a/modelcache/adapter_mm/__init__.py b/modelcache_mm/adapter_mm/__init__.py similarity index 100% rename from modelcache/adapter_mm/__init__.py rename to modelcache_mm/adapter_mm/__init__.py diff --git a/modelcache/adapter_mm/adapter.py b/modelcache_mm/adapter_mm/adapter.py similarity index 84% rename from modelcache/adapter_mm/adapter.py rename to modelcache_mm/adapter_mm/adapter.py index 5f63098..edbd32e 100644 --- a/modelcache/adapter_mm/adapter.py +++ b/modelcache_mm/adapter_mm/adapter.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- import logging -from modelcache.adapter_mm.adapter_query import adapt_query -from modelcache.adapter_mm.adapter_insert import adapt_insert -from modelcache.adapter_mm.adapter_remove import adapt_remove -from modelcache.adapter_mm.adapter_register import adapt_register +from modelcache_mm.adapter_mm.adapter_query import adapt_query +from modelcache_mm.adapter_mm.adapter_insert import adapt_insert +from modelcache_mm.adapter_mm.adapter_remove import adapt_remove +from modelcache_mm.adapter_mm.adapter_register import adapt_register class ChatCompletion(object): diff --git a/modelcache/adapter_mm/adapter_insert.py b/modelcache_mm/adapter_mm/adapter_insert.py similarity index 95% rename from modelcache/adapter_mm/adapter_insert.py rename to modelcache_mm/adapter_mm/adapter_insert.py index 8a06c92..511e70e 100644 --- a/modelcache/adapter_mm/adapter_insert.py +++ b/modelcache_mm/adapter_mm/adapter_insert.py @@ -66,6 +66,9 @@ def adapt_insert(*args, **kwargs): image_embeddings = embedding_data_resp['image_embedding'] text_embeddings = embedding_data_resp['text_embeddings'] + print('image_embeddings: {}'.format(image_embeddings)) + print('text_embeddings: {}'.format(text_embeddings)) + if len(image_embeddings) > 0 and len(image_embeddings) > 0: image_embedding = np.array(image_embeddings[0]) text_embedding = text_embeddings[0] @@ -82,6 +85,7 @@ def adapt_insert(*args, **kwargs): else: raise ValueError('maya embedding service return both empty list, please check!') + print('embedding_data: {}'.format(embedding_data)) chat_cache.data_manager.save( pre_embedding_text, pre_embedding_image_url, diff --git a/modelcache/adapter_mm/adapter_query.py b/modelcache_mm/adapter_mm/adapter_query.py similarity index 100% rename from modelcache/adapter_mm/adapter_query.py rename to modelcache_mm/adapter_mm/adapter_query.py diff --git a/modelcache/adapter_mm/adapter_register.py b/modelcache_mm/adapter_mm/adapter_register.py similarity index 100% rename from modelcache/adapter_mm/adapter_register.py rename to modelcache_mm/adapter_mm/adapter_register.py diff --git a/modelcache/adapter_mm/adapter_remove.py b/modelcache_mm/adapter_mm/adapter_remove.py similarity index 100% rename from modelcache/adapter_mm/adapter_remove.py rename to modelcache_mm/adapter_mm/adapter_remove.py diff --git a/modelcache_mm/manager_mm/__init__.py b/modelcache_mm/manager_mm/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/modelcache_mm/manager_mm/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/modelcache/manager_mm/data_manager.py b/modelcache_mm/manager_mm/data_manager.py similarity index 98% rename from modelcache/manager_mm/data_manager.py rename to modelcache_mm/manager_mm/data_manager.py index b7c8a1e..2627446 100644 --- a/modelcache/manager_mm/data_manager.py +++ b/modelcache_mm/manager_mm/data_manager.py @@ -220,10 +220,8 @@ def import_data(self, texts: List[Any], image_urls: List[Any], image_ids: List[A # iat_embedding = embedding.astype("float32") cache_datas.append([ans, text, image_url, image_id, model]) - # ids = self.s.batch_multimodal_insert(cache_datas) ids = self.s.batch_insert(cache_datas) - # self.v.multimodal_add( - self.v.iat_add( + self.v.add( [ VectorData(id=ids[i], data=embedding) for i, embedding in enumerate(embeddings) diff --git a/modelcache/manager_mm/data_manager_mm.py b/modelcache_mm/manager_mm/data_manager_mm.py similarity index 100% rename from modelcache/manager_mm/data_manager_mm.py rename to modelcache_mm/manager_mm/data_manager_mm.py diff --git a/modelcache/manager_mm/eviction/__init__.py b/modelcache_mm/manager_mm/eviction/__init__.py similarity index 100% rename from modelcache/manager_mm/eviction/__init__.py rename to modelcache_mm/manager_mm/eviction/__init__.py diff --git a/modelcache/manager_mm/eviction/base.py b/modelcache_mm/manager_mm/eviction/base.py similarity index 100% rename from modelcache/manager_mm/eviction/base.py rename to modelcache_mm/manager_mm/eviction/base.py diff --git a/modelcache/manager_mm/eviction/manager.py b/modelcache_mm/manager_mm/eviction/manager.py similarity index 100% rename from modelcache/manager_mm/eviction/manager.py rename to modelcache_mm/manager_mm/eviction/manager.py diff --git a/modelcache/manager_mm/eviction/memory_cache.py b/modelcache_mm/manager_mm/eviction/memory_cache.py similarity index 100% rename from modelcache/manager_mm/eviction/memory_cache.py rename to modelcache_mm/manager_mm/eviction/memory_cache.py diff --git a/modelcache/manager_mm/eviction_manager.py b/modelcache_mm/manager_mm/eviction_manager.py similarity index 100% rename from modelcache/manager_mm/eviction_manager.py rename to modelcache_mm/manager_mm/eviction_manager.py diff --git a/modelcache/manager_mm/factory.py b/modelcache_mm/manager_mm/factory.py similarity index 85% rename from modelcache/manager_mm/factory.py rename to modelcache_mm/manager_mm/factory.py index df920d2..6d834d6 100644 --- a/modelcache/manager_mm/factory.py +++ b/modelcache_mm/manager_mm/factory.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from typing import Union, Callable -from modelcache.manager_mm import CacheBase, VectorBase, ObjectBase -from modelcache.manager_mm.data_manager import SSDataManager, MapDataManager +from modelcache_mm.manager_mm import CacheBase, VectorBase, ObjectBase +from modelcache_mm.manager_mm.data_manager import SSDataManager, MapDataManager def get_data_manager( diff --git a/modelcache/manager_mm/object_data/__init__.py b/modelcache_mm/manager_mm/object_data/__init__.py similarity index 100% rename from modelcache/manager_mm/object_data/__init__.py rename to modelcache_mm/manager_mm/object_data/__init__.py diff --git a/modelcache/manager_mm/object_data/base.py b/modelcache_mm/manager_mm/object_data/base.py similarity index 100% rename from modelcache/manager_mm/object_data/base.py rename to modelcache_mm/manager_mm/object_data/base.py diff --git a/modelcache/manager_mm/scalar_data/__init__.py b/modelcache_mm/manager_mm/scalar_data/__init__.py similarity index 100% rename from modelcache/manager_mm/scalar_data/__init__.py rename to modelcache_mm/manager_mm/scalar_data/__init__.py diff --git a/modelcache/manager_mm/scalar_data/base.py b/modelcache_mm/manager_mm/scalar_data/base.py similarity index 100% rename from modelcache/manager_mm/scalar_data/base.py rename to modelcache_mm/manager_mm/scalar_data/base.py diff --git a/modelcache/manager_mm/scalar_data/manager.py b/modelcache_mm/manager_mm/scalar_data/manager.py similarity index 100% rename from modelcache/manager_mm/scalar_data/manager.py rename to modelcache_mm/manager_mm/scalar_data/manager.py diff --git a/modelcache/manager_mm/scalar_data/sql_storage.py b/modelcache_mm/manager_mm/scalar_data/sql_storage.py similarity index 100% rename from modelcache/manager_mm/scalar_data/sql_storage.py rename to modelcache_mm/manager_mm/scalar_data/sql_storage.py diff --git a/modelcache/manager_mm/scalar_data/sql_storage_sqlite.py b/modelcache_mm/manager_mm/scalar_data/sql_storage_sqlite.py similarity index 100% rename from modelcache/manager_mm/scalar_data/sql_storage_sqlite.py rename to modelcache_mm/manager_mm/scalar_data/sql_storage_sqlite.py diff --git a/modelcache/manager_mm/vector_data/__init__.py b/modelcache_mm/manager_mm/vector_data/__init__.py similarity index 100% rename from modelcache/manager_mm/vector_data/__init__.py rename to modelcache_mm/manager_mm/vector_data/__init__.py diff --git a/modelcache/manager_mm/vector_data/base.py b/modelcache_mm/manager_mm/vector_data/base.py similarity index 91% rename from modelcache/manager_mm/vector_data/base.py rename to modelcache_mm/manager_mm/vector_data/base.py index 1af12ef..b9692f3 100644 --- a/modelcache/manager_mm/vector_data/base.py +++ b/modelcache_mm/manager_mm/vector_data/base.py @@ -15,7 +15,7 @@ class VectorBase(ABC): """VectorBase: base vector store interface""" @abstractmethod - def mul_add(self, datas: List[VectorData], model=None): + def add(self, datas: List[VectorData], model=None): pass @abstractmethod diff --git a/modelcache/manager_mm/vector_data/faiss.py b/modelcache_mm/manager_mm/vector_data/faiss.py similarity index 100% rename from modelcache/manager_mm/vector_data/faiss.py rename to modelcache_mm/manager_mm/vector_data/faiss.py diff --git a/modelcache/manager_mm/vector_data/manager.py b/modelcache_mm/manager_mm/vector_data/manager.py similarity index 98% rename from modelcache/manager_mm/vector_data/manager.py rename to modelcache_mm/manager_mm/vector_data/manager.py index bb08247..c399a88 100644 --- a/modelcache/manager_mm/vector_data/manager.py +++ b/modelcache_mm/manager_mm/vector_data/manager.py @@ -69,7 +69,7 @@ def get(name, **kwargs): local_data=local_data ) elif name == "redis": - from modelcache.manager_mm.vector_data.redis import RedisVectorStore + from modelcache_mm.manager_mm.vector_data.redis import RedisVectorStore redis_config = kwargs.get("redis_config") mm_dimension = kwargs.get("mm_dimension", DIMENSION) diff --git a/modelcache/manager_mm/vector_data/milvus.py b/modelcache_mm/manager_mm/vector_data/milvus.py similarity index 100% rename from modelcache/manager_mm/vector_data/milvus.py rename to modelcache_mm/manager_mm/vector_data/milvus.py diff --git a/modelcache/manager_mm/vector_data/redis.py b/modelcache_mm/manager_mm/vector_data/redis.py similarity index 87% rename from modelcache/manager_mm/vector_data/redis.py rename to modelcache_mm/manager_mm/vector_data/redis.py index 2e3cce1..d712406 100644 --- a/modelcache/manager_mm/vector_data/redis.py +++ b/modelcache_mm/manager_mm/vector_data/redis.py @@ -3,10 +3,10 @@ import numpy as np from redis.commands.search.indexDefinition import IndexDefinition, IndexType from redis.commands.search.query import Query -from redis.commands.search.field import TagField, VectorField, NumericField +from redis.commands.search.field import VectorField, NumericField from redis.client import Redis -from modelcache.manager_mm.vector_data.base import VectorBase, VectorData +from modelcache_mm.manager_mm.vector_data.base import VectorBase, VectorData from modelcache.utils import import_redis from modelcache.utils.log import modelcache_log from modelcache.utils.index_util import get_mm_index_name @@ -90,28 +90,14 @@ def create_index(self, index_name, mm_type, index_prefix): return 'create_success' def add(self, datas: List[VectorData], model=None, mm_type=None): - # pipe = self._client.pipeline() for data in datas: id: int = data.id embedding = data.data.astype(np.float32).tobytes() - # obj = { - # "vector": data.data.astype(np.float32).tobytes(), - # } - # collection_name = self.collection_prefix + '_' + model + '_' + self.table_suffix - # collection_name = get_collection_iat_name(model, iat_type, self.table_suffix) index_prefix = get_mm_index_prefix(model, mm_type) - # print('collection_name: {}'.format(collection_name)) - - # id_field_name = collection_name + '_' + "id" - # embedding_field_name = collection_name + '_' + "vec" id_field_name = "data_id" embedding_field_name = "data_vector" - obj = {id_field_name: id, embedding_field_name: embedding} - # print('obj: {}'.format(obj)) self._client.hset(f"{index_prefix}{id}", mapping=obj) - # pipe.hset(f"{self.doc_prefix}{key}", mapping=obj) - # pipe.execute() def search(self, data: np.ndarray, top_k: int = -1, model=None, mm_type=None): index_name = get_mm_index_name(model, mm_type) diff --git a/multicache_serving.py b/multicache_serving.py index 77411e2..096d576 100644 --- a/multicache_serving.py +++ b/multicache_serving.py @@ -19,9 +19,9 @@ import configparser from modelcache import cache # from modelcache.adapter import adapter -from modelcache.adapter_mm import adapter +from modelcache_mm.adapter_mm import adapter # from modelcache.manager import CacheBase, VectorBase, get_data_manager -from modelcache.manager_mm import CacheBase, VectorBase, get_data_manager +from modelcache_mm.manager_mm import CacheBase, VectorBase, get_data_manager from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation from modelcache.processor.pre import mm_insert_dict from modelcache.processor.pre import mm_query_dict From c7bedff2a8fa4d8d0a199501ac0578ab4d7ede16 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 17 Apr 2024 11:05:44 +0800 Subject: [PATCH 28/98] add config init file --- .gitignore | 4 ++-- modelcache_mm/config/milvus_config.ini | 5 +++++ modelcache_mm/config/mysql_config.ini | 6 ++++++ modelcache_mm/config/redis_config.ini | 5 +++++ 4 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 modelcache_mm/config/milvus_config.ini create mode 100644 modelcache_mm/config/mysql_config.ini create mode 100644 modelcache_mm/config/redis_config.ini diff --git a/.gitignore b/.gitignore index 929c3ce..5eb23d8 100644 --- a/.gitignore +++ b/.gitignore @@ -136,6 +136,6 @@ dmypy.json /flask_server *.bin **/modelcache_serving.py -*.ini +**/maya_embedding_service -**/maya_embedding_service \ No newline at end of file +#*.ini \ No newline at end of file diff --git a/modelcache_mm/config/milvus_config.ini b/modelcache_mm/config/milvus_config.ini new file mode 100644 index 0000000..f5bd532 --- /dev/null +++ b/modelcache_mm/config/milvus_config.ini @@ -0,0 +1,5 @@ +[milvus] +host = '' +port = '' +user = '' +password = '' \ No newline at end of file diff --git a/modelcache_mm/config/mysql_config.ini b/modelcache_mm/config/mysql_config.ini new file mode 100644 index 0000000..2c63f0e --- /dev/null +++ b/modelcache_mm/config/mysql_config.ini @@ -0,0 +1,6 @@ +[mysql] +host = '' +port = '' +username = '' +password = '' +database = '' diff --git a/modelcache_mm/config/redis_config.ini b/modelcache_mm/config/redis_config.ini new file mode 100644 index 0000000..a1cdb3d --- /dev/null +++ b/modelcache_mm/config/redis_config.ini @@ -0,0 +1,5 @@ +[redis] +host = '' +port = '' +user = '' +password = '' From 62be8253a57ca0b39df7ca88895a443c53e5cb50 Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 18 Apr 2024 10:45:05 +0800 Subject: [PATCH 29/98] add modelcache mm ability --- .gitignore | 2 +- modelcache/manager/scalar_data/base.py | 2 +- modelcache_mm/__init__.py | 15 +- .../{adapter_mm => adapter}/__init__.py | 0 .../{adapter_mm => adapter}/adapter.py | 23 +- .../{adapter_mm => adapter}/adapter_insert.py | 9 +- modelcache_mm/adapter/adapter_query.py | 239 ++++++++++++++ .../adapter_register.py | 8 +- .../{adapter_mm => adapter}/adapter_remove.py | 11 +- modelcache_mm/adapter_mm/adapter_query.py | 148 --------- modelcache_mm/config.py | 22 ++ modelcache_mm/core.py | 76 +++++ modelcache_mm/embedding/__init__.py | 32 ++ modelcache_mm/embedding/base.py | 17 + modelcache_mm/embedding/data2vec.py | 108 +++++++ modelcache_mm/embedding/fasttext.py | 27 ++ modelcache_mm/embedding/huggingface.py | 64 ++++ modelcache_mm/embedding/llmEmb.py | 38 +++ modelcache_mm/embedding/onnx.py | 70 +++++ modelcache_mm/embedding/paddlenlp.py | 60 ++++ modelcache_mm/embedding/string.py | 5 + modelcache_mm/embedding/timm.py | 72 +++++ modelcache_mm/manager/__init__.py | 5 + .../{manager_mm => manager}/data_manager.py | 124 ++++++-- .../eviction/__init__.py | 0 .../{manager_mm => manager}/eviction/base.py | 0 .../eviction/manager.py | 0 .../eviction/memory_cache.py | 0 .../eviction_manager.py | 0 .../{manager_mm => manager}/factory.py | 5 +- .../object_data/__init__.py | 0 .../object_data/base.py | 0 .../scalar_data/__init__.py | 4 +- .../scalar_data/base.py | 0 .../scalar_data/manager.py | 8 +- .../scalar_data/sql_storage.py | 63 +++- .../scalar_data/sql_storage_sqlite.py | 0 .../vector_data/__init__.py | 4 +- .../vector_data/base.py | 12 +- .../vector_data/faiss.py | 0 .../vector_data/manager.py | 4 +- .../vector_data/milvus.py | 0 .../vector_data/redis.py | 36 ++- modelcache_mm/manager_mm/data_manager_mm.py | 291 ------------------ .../{manager_mm => processor}/__init__.py | 0 modelcache_mm/processor/post.py | 15 + modelcache_mm/processor/pre.py | 121 ++++++++ modelcache_mm/report.py | 44 +++ .../similarity_evaluation/__init__.py | 11 + .../similarity_evaluation/distance.py | 24 ++ .../similarity_evaluation/exact_match.py | 17 + .../similarity_evaluation.py | 15 + modelcache_mm/utils/__init__.py | 75 +++++ modelcache_mm/utils/cache_func.py | 3 + modelcache_mm/utils/dependency_control.py | 18 ++ modelcache_mm/utils/env_config.py | 1 + modelcache_mm/utils/error.py | 35 +++ modelcache_mm/utils/index_util.py | 35 +++ modelcache_mm/utils/lazy_import.py | 28 ++ modelcache_mm/utils/log.py | 7 + modelcache_mm/utils/model_filter.py | 15 + modelcache_mm/utils/time.py | 20 ++ multicache_serving.py | 112 ++++--- 63 files changed, 1593 insertions(+), 607 deletions(-) rename modelcache_mm/{adapter_mm => adapter}/__init__.py (100%) rename modelcache_mm/{adapter_mm => adapter}/adapter.py (67%) rename modelcache_mm/{adapter_mm => adapter}/adapter_insert.py (94%) create mode 100644 modelcache_mm/adapter/adapter_query.py rename modelcache_mm/{adapter_mm => adapter}/adapter_register.py (76%) rename modelcache_mm/{adapter_mm => adapter}/adapter_remove.py (75%) delete mode 100644 modelcache_mm/adapter_mm/adapter_query.py create mode 100644 modelcache_mm/config.py create mode 100644 modelcache_mm/core.py create mode 100644 modelcache_mm/embedding/__init__.py create mode 100644 modelcache_mm/embedding/base.py create mode 100644 modelcache_mm/embedding/data2vec.py create mode 100644 modelcache_mm/embedding/fasttext.py create mode 100644 modelcache_mm/embedding/huggingface.py create mode 100644 modelcache_mm/embedding/llmEmb.py create mode 100644 modelcache_mm/embedding/onnx.py create mode 100644 modelcache_mm/embedding/paddlenlp.py create mode 100644 modelcache_mm/embedding/string.py create mode 100644 modelcache_mm/embedding/timm.py create mode 100644 modelcache_mm/manager/__init__.py rename modelcache_mm/{manager_mm => manager}/data_manager.py (67%) rename modelcache_mm/{manager_mm => manager}/eviction/__init__.py (100%) rename modelcache_mm/{manager_mm => manager}/eviction/base.py (100%) rename modelcache_mm/{manager_mm => manager}/eviction/manager.py (100%) rename modelcache_mm/{manager_mm => manager}/eviction/memory_cache.py (100%) rename modelcache_mm/{manager_mm => manager}/eviction_manager.py (100%) rename modelcache_mm/{manager_mm => manager}/factory.py (85%) rename modelcache_mm/{manager_mm => manager}/object_data/__init__.py (100%) rename modelcache_mm/{manager_mm => manager}/object_data/base.py (100%) rename modelcache_mm/{manager_mm => manager}/scalar_data/__init__.py (53%) rename modelcache_mm/{manager_mm => manager}/scalar_data/base.py (100%) rename modelcache_mm/{manager_mm => manager}/scalar_data/manager.py (74%) rename modelcache_mm/{manager_mm => manager}/scalar_data/sql_storage.py (70%) rename modelcache_mm/{manager_mm => manager}/scalar_data/sql_storage_sqlite.py (100%) rename modelcache_mm/{manager_mm => manager}/vector_data/__init__.py (58%) rename modelcache_mm/{manager_mm => manager}/vector_data/base.py (66%) rename modelcache_mm/{manager_mm => manager}/vector_data/faiss.py (100%) rename modelcache_mm/{manager_mm => manager}/vector_data/manager.py (97%) rename modelcache_mm/{manager_mm => manager}/vector_data/milvus.py (100%) rename modelcache_mm/{manager_mm => manager}/vector_data/redis.py (84%) delete mode 100644 modelcache_mm/manager_mm/data_manager_mm.py rename modelcache_mm/{manager_mm => processor}/__init__.py (100%) create mode 100644 modelcache_mm/processor/post.py create mode 100644 modelcache_mm/processor/pre.py create mode 100644 modelcache_mm/report.py create mode 100644 modelcache_mm/similarity_evaluation/__init__.py create mode 100644 modelcache_mm/similarity_evaluation/distance.py create mode 100644 modelcache_mm/similarity_evaluation/exact_match.py create mode 100644 modelcache_mm/similarity_evaluation/similarity_evaluation.py create mode 100644 modelcache_mm/utils/__init__.py create mode 100644 modelcache_mm/utils/cache_func.py create mode 100644 modelcache_mm/utils/dependency_control.py create mode 100644 modelcache_mm/utils/env_config.py create mode 100644 modelcache_mm/utils/error.py create mode 100644 modelcache_mm/utils/index_util.py create mode 100644 modelcache_mm/utils/lazy_import.py create mode 100644 modelcache_mm/utils/log.py create mode 100644 modelcache_mm/utils/model_filter.py create mode 100644 modelcache_mm/utils/time.py diff --git a/.gitignore b/.gitignore index 5eb23d8..ac8448b 100644 --- a/.gitignore +++ b/.gitignore @@ -138,4 +138,4 @@ dmypy.json **/modelcache_serving.py **/maya_embedding_service -#*.ini \ No newline at end of file +*.ini \ No newline at end of file diff --git a/modelcache/manager/scalar_data/base.py b/modelcache/manager/scalar_data/base.py index fd8bb50..f080491 100644 --- a/modelcache/manager/scalar_data/base.py +++ b/modelcache/manager/scalar_data/base.py @@ -95,7 +95,7 @@ def create(self): pass @abstractmethod - def batch_insert(self, all_data: List[CacheData]): + def batch_iat_insert(self, all_data: List[CacheData]): pass @abstractmethod diff --git a/modelcache_mm/__init__.py b/modelcache_mm/__init__.py index 73db86c..17c93fb 100644 --- a/modelcache_mm/__init__.py +++ b/modelcache_mm/__init__.py @@ -1,12 +1,5 @@ # -*- coding: utf-8 -*- -""" - Alipay.com Inc. - Copyright (c) 2004-2023 All Rights Reserved. - ------------------------------------------------------ - File Name : __init__.py.py - Author : fuhui.phe - Create Time : 2024/4/17 10:53 - Description : description what the main function of this file - Change Activity: - version0 : 2024/4/17 10:53 by fuhui.phe init -""" +from modelcache_mm.core import Cache +from modelcache_mm.core import cache +from modelcache_mm.config import Config +import modelcache_mm.adapter diff --git a/modelcache_mm/adapter_mm/__init__.py b/modelcache_mm/adapter/__init__.py similarity index 100% rename from modelcache_mm/adapter_mm/__init__.py rename to modelcache_mm/adapter/__init__.py diff --git a/modelcache_mm/adapter_mm/adapter.py b/modelcache_mm/adapter/adapter.py similarity index 67% rename from modelcache_mm/adapter_mm/adapter.py rename to modelcache_mm/adapter/adapter.py index edbd32e..722a8db 100644 --- a/modelcache_mm/adapter_mm/adapter.py +++ b/modelcache_mm/adapter/adapter.py @@ -1,16 +1,16 @@ # -*- coding: utf-8 -*- import logging -from modelcache_mm.adapter_mm.adapter_query import adapt_query -from modelcache_mm.adapter_mm.adapter_insert import adapt_insert -from modelcache_mm.adapter_mm.adapter_remove import adapt_remove -from modelcache_mm.adapter_mm.adapter_register import adapt_register +from modelcache_mm.adapter.adapter_query import adapt_query +from modelcache_mm.adapter.adapter_insert import adapt_insert +from modelcache_mm.adapter.adapter_remove import adapt_remove +from modelcache_mm.adapter.adapter_register import adapt_register class ChatCompletion(object): """Openai ChatCompletion Wrapper""" @classmethod - def create_mm_query(cls, *args, **kwargs): + def create_query(cls, *args, **kwargs): def cache_data_convert(cache_data, cache_query): return construct_resp_from_cache(cache_data, cache_query) try: @@ -20,10 +20,10 @@ def cache_data_convert(cache_data, cache_query): **kwargs ) except Exception as e: - return str(e) - + # return str(e) + raise e @classmethod - def create_mm_insert(cls, *args, **kwargs): + def create_insert(cls, *args, **kwargs): try: return adapt_insert( *args, @@ -34,18 +34,17 @@ def create_mm_insert(cls, *args, **kwargs): raise e @classmethod - def create_mm_remove(cls, *args, **kwargs): + def create_remove(cls, *args, **kwargs): try: return adapt_remove( *args, **kwargs ) except Exception as e: - logging.info('adapt_remove_e: {}'.format(e)) - return str(e) + raise e @classmethod - def create_mm_register(cls, *args, **kwargs): + def create_register(cls, *args, **kwargs): try: return adapt_register( *args, diff --git a/modelcache_mm/adapter_mm/adapter_insert.py b/modelcache_mm/adapter/adapter_insert.py similarity index 94% rename from modelcache_mm/adapter_mm/adapter_insert.py rename to modelcache_mm/adapter/adapter_insert.py index 511e70e..2d94798 100644 --- a/modelcache_mm/adapter_mm/adapter_insert.py +++ b/modelcache_mm/adapter/adapter_insert.py @@ -3,9 +3,9 @@ import requests import base64 import numpy as np -from modelcache import cache -from modelcache.utils.error import NotInitError -from modelcache.utils.time import time_cal +from modelcache_mm import cache +from modelcache_mm.utils.error import NotInitError +from modelcache_mm.utils.time import time_cal def adapt_insert(*args, **kwargs): @@ -19,7 +19,7 @@ def adapt_insert(*args, **kwargs): cache_enable = chat_cache.cache_enable_func(*args, **kwargs) context = kwargs.pop("cache_context", {}) embedding_data = None - pre_embedding_data_dict = chat_cache.mm_insert_pre_embedding_func( + pre_embedding_data_dict = chat_cache.insert_pre_embedding_func( kwargs, extra_param=context.get("pre_embedding_func", None), prompts=chat_cache.config.prompts, @@ -84,7 +84,6 @@ def adapt_insert(*args, **kwargs): mm_type = 'text' else: raise ValueError('maya embedding service return both empty list, please check!') - print('embedding_data: {}'.format(embedding_data)) chat_cache.data_manager.save( pre_embedding_text, diff --git a/modelcache_mm/adapter/adapter_query.py b/modelcache_mm/adapter/adapter_query.py new file mode 100644 index 0000000..88a52c5 --- /dev/null +++ b/modelcache_mm/adapter/adapter_query.py @@ -0,0 +1,239 @@ +# -*- coding: utf-8 -*- +import time +import requests +import numpy as np +import base64 +from modelcache_mm import cache +from modelcache_mm.utils.error import NotInitError +from modelcache_mm.utils.error import MultiTypeError +from modelcache_mm.utils.time import time_cal + + +def adapt_query(cache_data_convert, *args, **kwargs): + chat_cache = kwargs.pop("cache_obj", cache) + scope = kwargs.pop("scope", None) + model = scope['model'] + if not chat_cache.has_init: + raise NotInitError() + + cache_enable = chat_cache.cache_enable_func(*args, **kwargs) + context = kwargs.pop("cache_context", {}) + cache_factor = kwargs.pop("cache_factor", 1.0) + + pre_embedding_data_dict = chat_cache.query_pre_embedding_func( + kwargs, + extra_param=context.get("pre_embedding_func", None), + prompts=chat_cache.config.prompts, + ) + + pre_embedding_text = '###'.join(pre_embedding_data_dict['text']) + pre_embedding_image_raw = pre_embedding_data_dict['imageRaw'] + pre_embedding_image_url = pre_embedding_data_dict['imageUrl'] + pre_multi_type = pre_embedding_data_dict['multiType'] + # print('pre_embedding_image_url: {}'.format(pre_embedding_image_url)) + # print('pre_embedding_text: {}'.format(pre_embedding_text)) + + # 判断逻辑 + if pre_multi_type == 'IMG_TEXT': + if pre_embedding_image_raw and pre_embedding_image_url: + raise ValueError( + "Both pre_embedding_imageUrl and pre_embedding_imageRaw cannot be non-empty at the same time.") + if pre_embedding_image_url: + url_start_time = time.time() + response = requests.get(pre_embedding_image_url) + image_data = response.content + pre_embedding_image = base64.b64encode(image_data).decode('utf-8') + get_image_time = '{}s'.format(round(time.time() - url_start_time, 2)) + print('get_image_time: {}'.format(get_image_time)) + elif pre_embedding_image_raw: + pre_embedding_image = pre_embedding_image_raw + else: + raise ValueError( + "Both pre_embedding_imageUrl and pre_embedding_imageRaw are empty. Please provide at least one.") + data_dict = {'text': [pre_embedding_text], 'image': pre_embedding_image} + # print('data_dict: {}'.format(data_dict)) + elif pre_multi_type == 'TEXT': + data_dict = {'text': [pre_embedding_text], 'image': None} + else: + raise MultiTypeError + # print('data_dict: {}'.format(data_dict)) + + embedding_data = None + mm_type = None + if cache_enable: + if pre_multi_type == 'IMG_TEXT': + embedding_data_resp = time_cal( + chat_cache.embedding_concurrent_func, + func_name="iat_embedding", + report_func=chat_cache.report.embedding, + )(data_dict) + else: + embedding_data_resp = time_cal( + chat_cache.embedding_func, + func_name="iat_embedding", + report_func=chat_cache.report.embedding, + )(data_dict) + image_embeddings = embedding_data_resp['image_embedding'] + text_embeddings = embedding_data_resp['text_embeddings'] + + if len(image_embeddings) > 0 and len(image_embeddings) > 0: + image_embedding = np.array(image_embeddings[0]) + text_embedding = np.array(text_embeddings[0]) + embedding_data = np.concatenate((image_embedding, text_embedding)) + mm_type = 'mm' + elif len(image_embeddings) > 0: + image_embedding = np.array(image_embeddings[0]) + embedding_data = image_embedding + mm_type = 'image' + elif len(text_embeddings) > 0: + text_embedding = np.array(text_embeddings[0]) + embedding_data = text_embedding + mm_type = 'text' + else: + raise ValueError('maya embedding service return both empty list, please check!') + + if cache_enable: + cache_data_list = time_cal( + chat_cache.data_manager.search, + func_name="vector_search", + report_func=chat_cache.report.search, + )( + embedding_data, + extra_param=context.get("search_func", None), + top_k=kwargs.pop("top_k", -1), + model=model, + mm_type=pre_multi_type, + ) + + cache_answers = [] + cache_questions = [] + cache_image_urls = [] + cache_image_ids = [] + cache_ids = [] + similarity_threshold = chat_cache.config.similarity_threshold + similarity_threshold_long = chat_cache.config.similarity_threshold_long + + min_rank, max_rank = chat_cache.similarity_evaluation.range() + rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor + rank_threshold_long = (max_rank - min_rank) * similarity_threshold_long * cache_factor + rank_threshold = ( + max_rank + if rank_threshold > max_rank + else min_rank + if rank_threshold < min_rank + else rank_threshold + ) + rank_threshold_long = ( + max_rank + if rank_threshold_long > max_rank + else min_rank + if rank_threshold_long < min_rank + else rank_threshold_long + ) + + if cache_data_list is None or len(cache_data_list) == 0: + rank_pre = -1.0 + else: + cache_data_dict = {'search_result': cache_data_list[0]} + rank_pre = chat_cache.similarity_evaluation.evaluation( + None, + cache_data_dict, + extra_param=context.get("evaluation_func", None), + ) + + print('rank_pre: {}'.format(rank_pre)) + print('rank_threshold: {}'.format(rank_threshold)) + if rank_pre < rank_threshold: + return + + for cache_data in cache_data_list: + print('cache_data: {}'.format(cache_data)) + primary_id = cache_data[1] + ret = chat_cache.data_manager.get_scalar_data( + cache_data, extra_param=context.get("get_scalar_data", None) + ) + if ret is None: + continue + + if "deps" in context and hasattr(ret.question, "deps"): + eval_query_data = { + "question": context["deps"][0]["data"], + "embedding": None + } + eval_cache_data = { + "question": ret.question.deps[0].data, + "answer": ret.answers[0].answer, + "search_result": cache_data, + "embedding": None, + } + else: + eval_query_data = { + "question": pre_embedding_text, + "embedding": embedding_data, + } + + eval_cache_data = { + "question": ret[0], + "image_url": ret[1], + "image_raw": ret[2], + "answer": ret[3], + "search_result": cache_data, + "embedding": None + } + rank = chat_cache.similarity_evaluation.evaluation( + eval_query_data, + eval_cache_data, + extra_param=context.get("evaluation_func", None), + ) + print('rank_threshold: {}'.format(rank_threshold)) + print('rank_threshold_long: {}'.format(rank_threshold_long)) + print('rank: {}'.format(rank)) + + if len(pre_embedding_text) <= 50: + if rank_threshold <= rank: + cache_answers.append((rank, ret[3])) + cache_image_urls.append((rank, ret[1])) + cache_image_ids.append((rank, ret[2])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) + else: + if rank_threshold_long <= rank: + cache_answers.append((rank, ret[3])) + cache_image_urls.append((rank, ret[1])) + cache_image_ids.append((rank, ret[2])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) + + cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True) + cache_image_urls = sorted(cache_image_urls, key=lambda x: x[0], reverse=True) + cache_image_ids = sorted(cache_image_ids, key=lambda x: x[0], reverse=True) + cache_questions = sorted(cache_questions, key=lambda x: x[0], reverse=True) + cache_ids = sorted(cache_ids, key=lambda x: x[0], reverse=True) + + print('cache_answers: {}'.format(cache_answers)) + + if len(cache_answers) != 0: + return_message = chat_cache.post_process_messages_func( + [t[1] for t in cache_answers] + ) + return_image_url = chat_cache.post_process_messages_func( + [t[1] for t in cache_image_urls] + ) + return_image_id = chat_cache.post_process_messages_func( + [t[1] for t in cache_image_ids] + ) + return_query = chat_cache.post_process_messages_func( + [t[1] for t in cache_questions] + ) + return_id = chat_cache.post_process_messages_func( + [t[1] for t in cache_ids] + ) + # 更新命中次数 + try: + chat_cache.data_manager.update_hit_count(return_id) + except Exception: + print('update_hit_count except, please check!') + + chat_cache.report.hint_cache() + return_query_dict = {"image_url": return_image_url, "image_id": return_image_id, "question": return_query} + return cache_data_convert(return_message, return_query_dict) diff --git a/modelcache_mm/adapter_mm/adapter_register.py b/modelcache_mm/adapter/adapter_register.py similarity index 76% rename from modelcache_mm/adapter_mm/adapter_register.py rename to modelcache_mm/adapter/adapter_register.py index fbec358..934d9bb 100644 --- a/modelcache_mm/adapter_mm/adapter_register.py +++ b/modelcache_mm/adapter/adapter_register.py @@ -1,16 +1,16 @@ # -*- coding: utf-8 -*- -from modelcache import cache +from modelcache_mm import cache def adapt_register(*args, **kwargs): chat_cache = kwargs.pop("cache_obj", cache) model = kwargs.pop("model", None) - mm_type = kwargs.pop("mm_type", None) + type = kwargs.pop("type", None) if model is None or len(model) == 0: return ValueError('') - print('mm_type: {}'.format(mm_type)) + print('type: {}'.format(type)) print('model: {}'.format(model)) - register_resp = chat_cache.data_manager.create_index(model, mm_type) + register_resp = chat_cache.data_manager.create_index(model, type) print('register_resp: {}'.format(register_resp)) return register_resp diff --git a/modelcache_mm/adapter_mm/adapter_remove.py b/modelcache_mm/adapter/adapter_remove.py similarity index 75% rename from modelcache_mm/adapter_mm/adapter_remove.py rename to modelcache_mm/adapter/adapter_remove.py index 25f1ba3..f6e26fa 100644 --- a/modelcache_mm/adapter_mm/adapter_remove.py +++ b/modelcache_mm/adapter/adapter_remove.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from modelcache import cache -from modelcache.utils.error import NotInitError, RemoveError +from modelcache_mm import cache +from modelcache_mm.utils.error import NotInitError def adapt_remove(*args, **kwargs): @@ -16,11 +16,10 @@ def adapt_remove(*args, **kwargs): # delete data if remove_type == 'delete_by_id': id_list = kwargs.pop("id_list", []) + print('id_list: {}'.format(id_list)) resp = chat_cache.data_manager.delete(id_list, model=model) elif remove_type == 'truncate_by_model': - resp = chat_cache.data_manager.truncate(model) + resp = chat_cache.data_manager.truncate_iat(model) else: - # resp = "remove_type_error" - raise RemoveError() + resp = "remove_type_error" return resp - diff --git a/modelcache_mm/adapter_mm/adapter_query.py b/modelcache_mm/adapter_mm/adapter_query.py deleted file mode 100644 index 934c644..0000000 --- a/modelcache_mm/adapter_mm/adapter_query.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -import logging -import time -from modelcache import cache -from modelcache.utils.error import NotInitError -from modelcache.utils.time import time_cal -from modelcache.processor.pre import multi_analysis - - -def adapt_query(cache_data_convert, *args, **kwargs): - chat_cache = kwargs.pop("cache_obj", cache) - scope = kwargs.pop("scope", None) - model = scope['model'] - if not chat_cache.has_init: - raise NotInitError() - cache_enable = chat_cache.cache_enable_func(*args, **kwargs) - context = kwargs.pop("cache_context", {}) - embedding_data = None - cache_factor = kwargs.pop("cache_factor", 1.0) - pre_embedding_data = chat_cache.query_pre_embedding_func( - kwargs, - extra_param=context.get("pre_embedding_func", None), - prompts=chat_cache.config.prompts, - ) - - if cache_enable: - embedding_data = time_cal( - chat_cache.embedding_func, - func_name="embedding", - report_func=chat_cache.report.embedding, - )(pre_embedding_data) - - if cache_enable: - cache_data_list = time_cal( - chat_cache.data_manager.search, - func_name="milvus_search", - report_func=chat_cache.report.search, - )( - embedding_data, - extra_param=context.get("search_func", None), - top_k=kwargs.pop("top_k", -1), - model=model - ) - cache_answers = [] - cache_questions = [] - cache_ids = [] - similarity_threshold = chat_cache.config.similarity_threshold - similarity_threshold_long = chat_cache.config.similarity_threshold_long - - min_rank, max_rank = chat_cache.similarity_evaluation.range() - rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor - rank_threshold_long = (max_rank - min_rank) * similarity_threshold_long * cache_factor - rank_threshold = ( - max_rank - if rank_threshold > max_rank - else min_rank - if rank_threshold < min_rank - else rank_threshold - ) - rank_threshold_long = ( - max_rank - if rank_threshold_long > max_rank - else min_rank - if rank_threshold_long < min_rank - else rank_threshold_long - ) - - if cache_data_list is None or len(cache_data_list) == 0: - rank_pre = -1.0 - else: - cache_data_dict = {'search_result': cache_data_list[0]} - rank_pre = chat_cache.similarity_evaluation.evaluation( - None, - cache_data_dict, - extra_param=context.get("evaluation_func", None), - ) - if rank_pre < rank_threshold: - return - - for cache_data in cache_data_list: - primary_id = cache_data[1] - start_time = time.time() - ret = chat_cache.data_manager.get_scalar_data( - cache_data, extra_param=context.get("get_scalar_data", None) - ) - if ret is None: - continue - - if "deps" in context and hasattr(ret.question, "deps"): - eval_query_data = { - "question": context["deps"][0]["data"], - "embedding": None - } - eval_cache_data = { - "question": ret.question.deps[0].data, - "answer": ret.answers[0].answer, - "search_result": cache_data, - "embedding": None, - } - else: - eval_query_data = { - "question": pre_embedding_data, - "embedding": embedding_data, - } - - eval_cache_data = { - "question": ret[0], - "answer": ret[1], - "search_result": cache_data, - "embedding": None - } - rank = chat_cache.similarity_evaluation.evaluation( - eval_query_data, - eval_cache_data, - extra_param=context.get("evaluation_func", None), - ) - - if len(pre_embedding_data) <= 256: - if rank_threshold <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) - cache_ids.append((rank, primary_id)) - else: - if rank_threshold_long <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) - cache_ids.append((rank, primary_id)) - cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True) - cache_questions = sorted(cache_questions, key=lambda x: x[0], reverse=True) - cache_ids = sorted(cache_ids, key=lambda x: x[0], reverse=True) - if len(cache_answers) != 0: - return_message = chat_cache.post_process_messages_func( - [t[1] for t in cache_answers] - ) - return_query = chat_cache.post_process_messages_func( - [t[1] for t in cache_questions] - ) - return_id = chat_cache.post_process_messages_func( - [t[1] for t in cache_ids] - ) - # 更新命中次数 - try: - chat_cache.data_manager.update_hit_count(return_id) - except Exception: - logging.info('update_hit_count except, please check!') - - chat_cache.report.hint_cache() - return cache_data_convert(return_message, return_query) diff --git a/modelcache_mm/config.py b/modelcache_mm/config.py new file mode 100644 index 0000000..69b3246 --- /dev/null +++ b/modelcache_mm/config.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +from typing import Optional, Callable, List +from modelcache.utils.error import CacheError + + +class Config: + + def __init__( + self, + log_time_func: Optional[Callable[[str, float], None]] = None, + similarity_threshold: float = 0.95, + similarity_threshold_long: float = 0.95, + prompts: Optional[List[str]] = None + ): + if similarity_threshold < 0 or similarity_threshold > 1: + raise CacheError( + "Invalid the similarity threshold param, reasonable range: 0-1" + ) + self.log_time_func = log_time_func + self.similarity_threshold = similarity_threshold + self.similarity_threshold_long = similarity_threshold_long + self.prompts = prompts diff --git a/modelcache_mm/core.py b/modelcache_mm/core.py new file mode 100644 index 0000000..2c6c037 --- /dev/null +++ b/modelcache_mm/core.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +import atexit +from typing import Optional, List, Any +from modelcache_mm.processor.post import first +from modelcache_mm.similarity_evaluation import ExactMatchEvaluation +from modelcache_mm.similarity_evaluation import SimilarityEvaluation +from modelcache_mm.embedding.string import to_embeddings as string_embedding +from modelcache_mm.report import Report +from modelcache_mm.config import Config +from modelcache_mm.utils.cache_func import cache_all +from modelcache_mm.utils.log import modelcache_log +from modelcache_mm.manager import get_data_manager +from modelcache_mm.manager.data_manager import DataManager + + +class Cache: + def __init__(self): + self.has_init = False + self.cache_enable_func = None + self.query_pre_embedding_func = None + self.insert_pre_embedding_func = None + self.embedding_func = None + self.embedding_concurrent_func = None + self.data_manager: Optional[DataManager] = None + self.similarity_evaluation: Optional[SimilarityEvaluation] = None + self.post_process_messages_func = None + self.config = Config() + self.report = Report() + self.next_cache = None + + def init( + self, + cache_enable_func=cache_all, + query_pre_embedding_func=None, + insert_pre_embedding_func=None, + embedding_func=string_embedding, + embedding_concurrent_func=string_embedding, + data_manager: DataManager = get_data_manager(), + similarity_evaluation=ExactMatchEvaluation(), + post_process_messages_func=first, + config=Config(), + next_cache=None, + ): + self.has_init = True + self.cache_enable_func = cache_enable_func + self.query_pre_embedding_func = query_pre_embedding_func + self.insert_pre_embedding_func = insert_pre_embedding_func + self.embedding_func = embedding_func + self.embedding_concurrent_func = embedding_concurrent_func + self.data_manager: DataManager = data_manager + self.similarity_evaluation = similarity_evaluation + self.post_process_messages_func = post_process_messages_func + self.config = config + self.next_cache = next_cache + + @atexit.register + def close(): + try: + self.data_manager.close() + except Exception as e: + modelcache_log.error(e) + + def import_data(self, questions: List[Any], answers: List[Any]) -> None: + self.data_manager.import_data( + questions=questions, + answers=answers, + embedding_datas=[self.embedding_func(question) for question in questions], + ) + + def flush(self): + self.data_manager.flush() + if self.next_cache: + self.next_cache.data_manager.flush() + + +cache = Cache() diff --git a/modelcache_mm/embedding/__init__.py b/modelcache_mm/embedding/__init__.py new file mode 100644 index 0000000..03b6762 --- /dev/null +++ b/modelcache_mm/embedding/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +from modelcache.utils.lazy_import import LazyImport +huggingface = LazyImport("huggingface", globals(), "modelcache.embedding.huggingface") +data2vec = LazyImport("data2vec", globals(), "modelcache.embedding.data2vec") +llmEmb = LazyImport("llmEmb", globals(), "modelcache.embedding.llmEmb") +fasttext = LazyImport("fasttext", globals(), "modelcache.embedding.fasttext") +paddlenlp = LazyImport("paddlenlp", globals(), "modelcache.embedding.paddlenlp") +timm = LazyImport("timm", globals(), "modelcache.embedding.timm") + + +def Huggingface(model="sentence-transformers/all-mpnet-base-v2"): + return huggingface.Huggingface(model) + + +def Data2VecAudio(model="facebook/data2vec-audio-base-960h"): + return data2vec.Data2VecAudio(model) + + +def LlmEmb2vecAudio(): + return llmEmb.LlmEmb2Vec() + + +def FastText(model="en", dim=None): + return fasttext.FastText(model, dim) + + +def PaddleNLP(model="ernie-3.0-medium-zh"): + return paddlenlp.PaddleNLP(model) + + +def Timm(model="resnet50", device="default"): + return timm.Timm(model, device) diff --git a/modelcache_mm/embedding/base.py b/modelcache_mm/embedding/base.py new file mode 100644 index 0000000..4b316aa --- /dev/null +++ b/modelcache_mm/embedding/base.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +from abc import ABCMeta, abstractmethod + + +class BaseEmbedding(metaclass=ABCMeta): + """ + _Embedding base. + """ + + @abstractmethod + def to_embeddings(self, data, **kwargs): + pass + + @property + @abstractmethod + def dimension(self) -> int: + return 0 diff --git a/modelcache_mm/embedding/data2vec.py b/modelcache_mm/embedding/data2vec.py new file mode 100644 index 0000000..274eb5a --- /dev/null +++ b/modelcache_mm/embedding/data2vec.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +import os +import time +import numpy as np +import torch +from transformers import BertTokenizer, BertModel +from modelcache.embedding.base import BaseEmbedding + + +def mean_pooling(model_output, attention_mask): + token_embeddings = model_output[0] # First element of model_output contains all token embeddings + input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) + + +class Data2VecAudio(BaseEmbedding): + def __init__(self, model: str = "sentence-transformers/all-MiniLM-L6-v2"): + current_dir = os.path.dirname(os.path.abspath(__file__)) + parent_dir = os.path.dirname(current_dir) + model_dir = os.path.dirname(parent_dir) + model = os.path.join(model_dir, 'model/text2vec-base-chinese/') + + try: + self.__dimension = self.model.config.hidden_size + except Exception: + from transformers import AutoConfig + + config = AutoConfig.from_pretrained(model) + self.__dimension = config.hidden_size + + self.device = 'cuda' if torch.cuda.is_available() else 'cpu' + self.tokenizer = BertTokenizer.from_pretrained(model, local_files_only=True) + self.model = BertModel.from_pretrained(model, local_files_only=True) + + def to_embeddings(self, data, **_): + encoded_input = self.tokenizer(data, padding=True, truncation=True, return_tensors='pt') + num_tokens = sum(map(len, encoded_input['input_ids'])) + + if num_tokens <= 512: + with torch.no_grad(): + encoded_input = {k: v.to(self.device) for k, v in encoded_input.items()} + model_output = self.model(**encoded_input) + sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) + sentence_embeddings = sentence_embeddings.squeeze(0).detach().cpu().numpy() + embedding_array = np.array(sentence_embeddings).astype("float32") + return embedding_array + else: + window_size = 510 + start = 0 + input_ids = encoded_input['input_ids'] + input_ids = input_ids[:, 1:-1] + start_token = self.tokenizer.cls_token + end_token = self.tokenizer.sep_token + start_token_id = self.tokenizer.convert_tokens_to_ids(start_token) + end_token_id = self.tokenizer.convert_tokens_to_ids(end_token) + begin_element = torch.tensor([[start_token_id]]) + end_element = torch.tensor([[end_token_id]]) + + embedding_array_list = list() + while start < num_tokens: + # Calculate the ending position of the sliding window. + end = start + window_size + # If the ending position exceeds the length, adjust it to the length. + if end > num_tokens: + end = num_tokens + # Retrieve the data within the sliding window. + input_ids_window = input_ids[:, start:end] + # Insert a new element at position 0. + input_ids_window = torch.cat([begin_element, input_ids_window[:, 0:]], dim=1) + # Insert a new element at the last position. + input_ids_window = torch.cat([input_ids_window, end_element], dim=1) + input_ids_window_length = sum(map(len, input_ids_window)) + token_type_ids = torch.tensor([[0] * input_ids_window_length]) + attention_mask = torch.tensor([[1] * input_ids_window_length]) + + # Concatenate new input_ids + encoded_input_window = {'input_ids': input_ids_window, 'token_type_ids': token_type_ids, + 'attention_mask': attention_mask} + with torch.no_grad(): + encoded_input_window = {k: v.to(self.device) for k, v in encoded_input_window.items()} + model_output_window = self.model(**encoded_input_window) + + sentence_embeddings_window = mean_pooling(model_output_window, encoded_input_window['attention_mask']) + sentence_embeddings_window = sentence_embeddings_window.squeeze(0).detach().cpu().numpy() + embedding_array_window = np.array(sentence_embeddings_window).astype("float32") + embedding_array_list.append(embedding_array_window) + start = end + + embedding_array = np.mean(embedding_array_list, axis=0) + return embedding_array + + def post_proc(self, token_embeddings, inputs): + attention_mask = inputs["attention_mask"] + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + ) + sentence_embs = torch.sum( + token_embeddings * input_mask_expanded, 1 + ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) + return sentence_embs + + @property + def dimension(self): + """Embedding dimension. + + :return: embedding dimension + """ + return self.__dimension diff --git a/modelcache_mm/embedding/fasttext.py b/modelcache_mm/embedding/fasttext.py new file mode 100644 index 0000000..adbc38c --- /dev/null +++ b/modelcache_mm/embedding/fasttext.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +import numpy as np +import os +from modelcache.utils import import_fasttext +from modelcache.embedding.base import BaseEmbedding +import_fasttext() +import fasttext.util + + +class FastText(BaseEmbedding): + def __init__(self, model: str = "en", dim: int = None): + self.model_path = os.path.abspath(fasttext.util.download_model(model)) + self.ft = fasttext.load_model(self.model_path) + + if dim: + fasttext.util.reduce_model(self.ft, dim) + self.__dimension = self.ft.get_dimension() + + def to_embeddings(self, data, **_): + assert isinstance(data, str), "Only allow string as input." + emb = self.ft.get_sentence_vector(data) + return np.array(emb).astype("float32") + + @property + def dimension(self): + return self.__dimension + diff --git a/modelcache_mm/embedding/huggingface.py b/modelcache_mm/embedding/huggingface.py new file mode 100644 index 0000000..8c1434d --- /dev/null +++ b/modelcache_mm/embedding/huggingface.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +import numpy as np + +from modelcache.utils import import_huggingface, import_torch +from modelcache.embedding.base import BaseEmbedding + +import_torch() +import_huggingface() + +import torch # pylint: disable=C0413 +from transformers import AutoTokenizer, AutoModel # pylint: disable=C0413 + + +class Huggingface(BaseEmbedding): + def __init__(self, model: str = "sentence-transformers/all-MiniLM-L6-v2"): + self.model = AutoModel.from_pretrained(model, local_files_only=True) + self.model.eval() + + # self.tokenizer = AutoTokenizer.from_pretrained(model) + self.tokenizer = AutoTokenizer.from_pretrained(model, local_files_only=True) + if not self.tokenizer.pad_token: + self.tokenizer.pad_token = "[PAD]" + try: + self.__dimension = self.model.config.hidden_size + except Exception: # pylint: disable=W0703 + from transformers import AutoConfig # pylint: disable=C0415 + + config = AutoConfig.from_pretrained(model) + self.__dimension = config.hidden_size + + def to_embeddings(self, data, **_): + """Generate embedding given text input + + :param data: text in string. + :type data: str + + :return: a text embedding in shape of (dim,). + """ + if not isinstance(data, list): + data = [data] + inputs = self.tokenizer( + data, padding=True, truncation=True, return_tensors="pt" + ) + outs = self.model(**inputs).last_hidden_state + emb = self.post_proc(outs, inputs).squeeze(0).detach().numpy() + return np.array(emb).astype("float32") + + def post_proc(self, token_embeddings, inputs): + attention_mask = inputs["attention_mask"] + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + ) + sentence_embs = torch.sum( + token_embeddings * input_mask_expanded, 1 + ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) + return sentence_embs + + @property + def dimension(self): + """Embedding dimension. + + :return: embedding dimension + """ + return self.__dimension diff --git a/modelcache_mm/embedding/llmEmb.py b/modelcache_mm/embedding/llmEmb.py new file mode 100644 index 0000000..096552c --- /dev/null +++ b/modelcache_mm/embedding/llmEmb.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +import numpy as np +from modelcache.embedding.base import BaseEmbedding +from transformers import AutoTokenizer +from transformers import AutoConfig + + +class LlmEmb2Vec(BaseEmbedding): + def __init__(self): + + self.model_name = '' # 13b-mft-embedding.npy + model_path = '' # .npy file storage path + model_file = model_path + self.model_name # .npy file + config = AutoConfig.from_pretrained(model_path) + dimension = config.hidden_size + self.__dimension = dimension + self.model = np.load(model_file) + self.tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True) + + def to_embeddings(self, data, **_): + """Generate embedding given text input + + :param data: text in string. + :return: a text embedding in shape of (dim,). + """ + input_ids = self.tokenizer.encode(data, add_special_tokens=True) + embedding_array = self.model[input_ids].mean(axis=0) + return embedding_array + + def post_proc(self, token_embeddings, inputs): + pass + + @property + def dimension(self): + """Embedding dimension. + :return: embedding dimension + """ + return self.__dimension diff --git a/modelcache_mm/embedding/onnx.py b/modelcache_mm/embedding/onnx.py new file mode 100644 index 0000000..9df64ff --- /dev/null +++ b/modelcache_mm/embedding/onnx.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +import numpy as np + +from modelcache.embedding.base import BaseEmbedding +from modelcache.utils import ( + import_onnxruntime, + import_huggingface_hub, + import_huggingface, +) + +import_huggingface() +import_onnxruntime() +import_huggingface_hub() + +from transformers import AutoTokenizer, AutoConfig # pylint: disable=C0413 +import onnxruntime +from modelcache.utils.env_config import get_onnx_tokenizer_path, get_onnx_model + + +class Onnx(BaseEmbedding): + + def __init__(self, model="modelcache_open/paraphrase-albert-onnx"): + # 本地加载 + onnx_tokenizer = get_onnx_tokenizer_path() + self.tokenizer = AutoTokenizer.from_pretrained(onnx_tokenizer, local_files_only=True) + # 本地加载 + onnx_model = get_onnx_model() + self.ort_session = onnxruntime.InferenceSession(onnx_model) + + config = AutoConfig.from_pretrained(onnx_tokenizer, local_files_only=True) + self.__dimension = config.hidden_size + + def to_embeddings(self, data, **_): + """Generate embedding given text input. + + :param data: text in string. + :type data: str + + :return: a text embedding in shape of (dim,). + """ + encoded_text = self.tokenizer.encode_plus(data, padding="max_length") + ort_inputs = { + "input_ids": np.array(encoded_text["input_ids"]).reshape(1, -1), + "attention_mask": np.array(encoded_text["attention_mask"]).reshape(1, -1), + "token_type_ids": np.array(encoded_text["token_type_ids"]).reshape(1, -1), + } + + ort_outputs = self.ort_session.run(None, ort_inputs) + ort_feat = ort_outputs[0] + emb = self.post_proc(ort_feat, ort_inputs["attention_mask"]) + return emb.flatten() + + def post_proc(self, token_embeddings, attention_mask): + input_mask_expanded = ( + np.expand_dims(attention_mask, -1) + .repeat(token_embeddings.shape[-1], -1) + .astype(float) + ) + sentence_embs = np.sum(token_embeddings * input_mask_expanded, 1) / np.maximum( + input_mask_expanded.sum(1), 1e-9 + ) + return sentence_embs + + @property + def dimension(self): + """Embedding dimension. + + :return: embedding dimension + """ + return self.__dimension diff --git a/modelcache_mm/embedding/paddlenlp.py b/modelcache_mm/embedding/paddlenlp.py new file mode 100644 index 0000000..4b6ccbd --- /dev/null +++ b/modelcache_mm/embedding/paddlenlp.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +import numpy as np + +from modelcache.embedding.base import BaseEmbedding +from modelcache.utils import import_paddlenlp, import_paddle + +import_paddle() +import_paddlenlp() + + +import paddle # pylint: disable=C0413 +from paddlenlp.transformers import AutoModel, AutoTokenizer # pylint: disable=C0413 + + +class PaddleNLP(BaseEmbedding): + def __init__(self, model: str = "ernie-3.0-medium-zh"): + self.model = AutoModel.from_pretrained(model) + self.model.eval() + + self.tokenizer = AutoTokenizer.from_pretrained(model) + if not self.tokenizer.pad_token: + self.tokenizer.pad_token = "" + self.__dimension = None + + def to_embeddings(self, data, **_): + """Generate embedding given text input + + :param data: text in string. + :type data: str + + :return: a text embedding in shape of (dim,). + """ + if not isinstance(data, list): + data = [data] + inputs = self.tokenizer( + data, padding=True, truncation=True, return_tensors="pd" + ) + outs = self.model(**inputs)[0] + emb = self.post_proc(outs, inputs).squeeze(0).detach().numpy() + return np.array(emb).astype("float32") + + def post_proc(self, token_embeddings, inputs): + attention_mask = paddle.ones(inputs["token_type_ids"].shape) + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.shape).astype("float32") + ) + sentence_embs = paddle.sum( + token_embeddings * input_mask_expanded, 1 + ) / paddle.clip(input_mask_expanded.sum(1), min=1e-9) + return sentence_embs + + @property + def dimension(self): + """Embedding dimension. + + :return: embedding dimension + """ + if not self.__dimension: + self.__dimension = len(self.to_embeddings("foo")) + return self.__dimension diff --git a/modelcache_mm/embedding/string.py b/modelcache_mm/embedding/string.py new file mode 100644 index 0000000..4fd08e7 --- /dev/null +++ b/modelcache_mm/embedding/string.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + + +def to_embeddings(data, **_): + return data diff --git a/modelcache_mm/embedding/timm.py b/modelcache_mm/embedding/timm.py new file mode 100644 index 0000000..5241e03 --- /dev/null +++ b/modelcache_mm/embedding/timm.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +import numpy as np + +from modelcache.utils import import_timm, import_torch, import_pillow +from modelcache.embedding.base import BaseEmbedding + +import_torch() +import_timm() +import_pillow() + +import torch # pylint: disable=C0413 +from timm.models import create_model # pylint: disable=C0413 +from timm.data import create_transform, resolve_data_config # pylint: disable=C0413 +from PIL import Image # pylint: disable=C0413 + + +class Timm(BaseEmbedding): + def __init__(self, model: str = "resnet18", device: str = "default"): + if device == "default": + self.device = "cuda" if torch.cuda.is_available() else "cpu" + else: + self.device = device + self.model_name = model + self.model = create_model(model_name=model, pretrained=True) + self.model.eval() + + try: + self.__dimension = self.model.embed_dim + except Exception: # pylint: disable=W0703 + self.__dimension = None + + def to_embeddings(self, data, skip_preprocess: bool = False, **_): + if not skip_preprocess: + data = self.preprocess(data) + if data.dim() == 3: + data = data.unsqueeze(0) + feats = self.model.forward_features(data) + emb = self.post_proc(feats).squeeze(0).detach().numpy() + + return np.array(emb).astype("float32") + + def post_proc(self, features): + features = features.to("cpu") + if features.dim() == 3: + features = features[:, 0] + if features.dim() == 4: + global_pool = torch.nn.AdaptiveAvgPool2d(1) + features = global_pool(features) + features = features.flatten(1) + assert features.dim() == 2, f"Invalid output dim {features.dim()}" + return features + + def preprocess(self, image_path): + data_cfg = resolve_data_config(self.model.pretrained_cfg) + transform = create_transform(**data_cfg) + + image = Image.open(image_path).convert("RGB") + image_tensor = transform(image) + return image_tensor + + @property + def dimension(self): + """Embedding dimension. + + :return: embedding dimension + """ + if not self.__dimension: + input_size = self.model.pretrained_cfg["input_size"] + dummy_input = torch.rand((1,) + input_size) + feats = self.to_embeddings(dummy_input, skip_preprocess=True) + self.__dimension = feats.shape[0] + return self.__dimension diff --git a/modelcache_mm/manager/__init__.py b/modelcache_mm/manager/__init__.py new file mode 100644 index 0000000..0b2e8be --- /dev/null +++ b/modelcache_mm/manager/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from modelcache_mm.manager.scalar_data import CacheBase +from modelcache_mm.manager.vector_data import VectorBase +from modelcache_mm.manager.object_data import ObjectBase +from modelcache_mm.manager.factory import get_data_manager diff --git a/modelcache_mm/manager_mm/data_manager.py b/modelcache_mm/manager/data_manager.py similarity index 67% rename from modelcache_mm/manager_mm/data_manager.py rename to modelcache_mm/manager/data_manager.py index 2627446..503dadb 100644 --- a/modelcache_mm/manager_mm/data_manager.py +++ b/modelcache_mm/manager/data_manager.py @@ -7,28 +7,24 @@ import cachetools from abc import abstractmethod, ABCMeta from typing import List, Any, Optional, Union -from modelcache.manager.scalar_data.base import ( +from modelcache_mm.manager.scalar_data.base import ( CacheStorage, CacheData, DataType, Answer, Question ) -from modelcache.utils.error import CacheError, ParamError -from modelcache.manager.vector_data.base import VectorBase, VectorData -from modelcache.manager.object_data.base import ObjectBase -from modelcache.manager.eviction import EvictionBase -from modelcache.manager.eviction_manager import EvictionManager -from modelcache.utils.log import modelcache_log +from modelcache_mm.utils.error import CacheError, ParamError +from modelcache_mm.manager.vector_data.base import VectorBase, VectorData +from modelcache_mm.manager.object_data.base import ObjectBase +# from modelcache.manager.eviction import EvictionBase +# from modelcache.manager.eviction_manager import EvictionManager +from modelcache_mm.utils.log import modelcache_log class DataManager(metaclass=ABCMeta): """DataManager manage the cache data, including save and search""" - # @abstractmethod - # def save(self, question, answer, embedding_data, **kwargs): - # pass - @abstractmethod def save(self, text, image_url, image_id, answer, embedding, **kwargs): pass @@ -38,8 +34,10 @@ def save_query_resp(self, query_resp_dict, **kwargs): pass @abstractmethod - def import_data(self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], - embeddings: List[Any], model: Any, iat_type: Any): + def import_data( + self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], + embeddings: List[Any], model: Any, iat_type: Any + ): pass @abstractmethod @@ -92,19 +90,23 @@ def init(self): f"You don't have permission to access this file <{self.data_path}>." ) - # def save(self, question, answer, embedding_data, **kwargs): - # if isinstance(question, Question): - # question = question.content - # self.data[embedding_data] = (question, answer, embedding_data) - def save(self, text, image_url, image_id, answer, embedding, **kwargs): + # if isinstance(question, Question): + # question = question.content + # self.data[embedding_data] = (question, answer, embedding_data) pass def save_query_resp(self, query_resp_dict, **kwargs): pass - def import_data(self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], - embeddings: List[Any], model: Any, iat_type: Any): + def import_data( + self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], + embeddings: List[Any], model: Any, iat_type: Any + ): + # if len(questions) != len(answers) or len(questions) != len(embedding_datas): + # raise ParamError("Make sure that all parameters have the same length") + # for i, embedding_data in enumerate(embedding_datas): + # self.data[embedding_data] = (questions[i], answers[i], embedding_datas[i]) pass def get_scalar_data(self, res_data, **kwargs) -> CacheData: @@ -160,15 +162,13 @@ def __init__( self.v = v self.o = o - # def save(self, question, answer, embedding_data, **kwargs): - # model = kwargs.pop("model", None) - # self.import_data([question], [answer], [embedding_data], model) - def save(self, text, image_url, image_id, answer, embedding, **kwargs): + # model = kwargs.pop("model", None) + # self.import_data([question], [answer], [embedding_data], model) + model = kwargs.pop("model", None) mm_type = kwargs.pop("mm_type", None) - self.import_data([text], [image_url], [image_id], [answer], - [embedding], model, mm_type) + self.import_data([text], [image_url], [image_id], [answer], [embedding], model, mm_type) def save_query_resp(self, query_resp_dict, **kwargs): save_query_start_time = time.time() @@ -198,8 +198,42 @@ def _process_question_data(self, question: Union[str, Question]): return Question(question) - def import_data(self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], - embeddings: List[Any], model: Any, iat_type: Any): + # def import_data( + # self, questions: List[Any], answers: List[Answer], embedding_datas: List[Any], model: Any + # ): + # if len(questions) != len(answers) or len(questions) != len(embedding_datas): + # raise ParamError("Make sure that all parameters have the same length") + # cache_datas = [] + # + # embedding_datas = [ + # normalize(embedding_data) for embedding_data in embedding_datas + # ] + # + # for i, embedding_data in enumerate(embedding_datas): + # if self.o is not None: + # ans = self._process_answer_data(answers[i]) + # else: + # ans = answers[i] + # + # question = questions[i] + # embedding_data = embedding_data.astype("float32") + # cache_datas.append([ans, question, embedding_data, model]) + # + # ids = self.s.batch_insert(cache_datas) + # logging.info('ids: {}'.format(ids)) + # self.v.mul_add( + # [ + # VectorData(id=ids[i], data=embedding_data) + # for i, embedding_data in enumerate(embedding_datas) + # ], + # model + # + # ) + + def import_data( + self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], + embeddings: List[Any], model: Any, mm_type: Any + ): if len(texts) != len(answers): raise ParamError("Make sure that all parameters have the same length") cache_datas = [] @@ -220,16 +254,24 @@ def import_data(self, texts: List[Any], image_urls: List[Any], image_ids: List[A # iat_embedding = embedding.astype("float32") cache_datas.append([ans, text, image_url, image_id, model]) + # ids = self.s.batch_multimodal_insert(cache_datas) ids = self.s.batch_insert(cache_datas) + # self.v.multimodal_add( self.v.add( [ VectorData(id=ids[i], data=embedding) for i, embedding in enumerate(embeddings) ], model, - iat_type + mm_type ) + # def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: + # cache_data = self.s.get_data_by_id(res_data[1]) + # if cache_data is None: + # return None + # return cache_data + def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: cache_data = self.s.get_data_by_id(res_data[1]) if cache_data is None: @@ -242,11 +284,29 @@ def update_hit_count(self, primary_id, **kwargs): def hit_cache_callback(self, res_data, **kwargs): self.eviction_base.get(res_data[1]) + # def search(self, embedding_data, **kwargs): + # model = kwargs.pop("model", None) + # embedding_data = normalize(embedding_data) + # top_k = kwargs.get("top_k", -1) + # return self.v.search(data=embedding_data, top_k=top_k, model=model) + def search(self, embedding_data, **kwargs): model = kwargs.pop("model", None) + mm_type = kwargs.pop("mm_type", None) embedding_data = normalize(embedding_data) top_k = kwargs.get("top_k", -1) - return self.v.search(data=embedding_data, top_k=top_k, model=model) + try: + search_result = self.v.search(data=embedding_data, top_k=top_k, model=model, mm_type=mm_type) + except Exception as e: + try: + message = str(e) + if "no such index" in message: + print('no such index异常,创建索引...') + self.v.create(model, mm_type) + search_result = self.v.search(data=embedding_data, top_k=top_k, model=model, mm_type=mm_type) + except Exception as e: + raise e + return search_result def delete(self, id_list, **kwargs): model = kwargs.pop("model", None) @@ -264,8 +324,8 @@ def delete(self, id_list, **kwargs): return {'status': 'success', 'milvus': 'delete_count: '+str(v_delete_count), 'mysql': 'delete_count: '+str(s_delete_count)} - def create_index(self, model, mm_type, **kwargs): - return self.v.create(model, mm_type) + def create_index(self, model, type, **kwargs): + return self.v.create(model, type) def truncate(self, model_name): # drop vector base data diff --git a/modelcache_mm/manager_mm/eviction/__init__.py b/modelcache_mm/manager/eviction/__init__.py similarity index 100% rename from modelcache_mm/manager_mm/eviction/__init__.py rename to modelcache_mm/manager/eviction/__init__.py diff --git a/modelcache_mm/manager_mm/eviction/base.py b/modelcache_mm/manager/eviction/base.py similarity index 100% rename from modelcache_mm/manager_mm/eviction/base.py rename to modelcache_mm/manager/eviction/base.py diff --git a/modelcache_mm/manager_mm/eviction/manager.py b/modelcache_mm/manager/eviction/manager.py similarity index 100% rename from modelcache_mm/manager_mm/eviction/manager.py rename to modelcache_mm/manager/eviction/manager.py diff --git a/modelcache_mm/manager_mm/eviction/memory_cache.py b/modelcache_mm/manager/eviction/memory_cache.py similarity index 100% rename from modelcache_mm/manager_mm/eviction/memory_cache.py rename to modelcache_mm/manager/eviction/memory_cache.py diff --git a/modelcache_mm/manager_mm/eviction_manager.py b/modelcache_mm/manager/eviction_manager.py similarity index 100% rename from modelcache_mm/manager_mm/eviction_manager.py rename to modelcache_mm/manager/eviction_manager.py diff --git a/modelcache_mm/manager_mm/factory.py b/modelcache_mm/manager/factory.py similarity index 85% rename from modelcache_mm/manager_mm/factory.py rename to modelcache_mm/manager/factory.py index 6d834d6..1f43b4a 100644 --- a/modelcache_mm/manager_mm/factory.py +++ b/modelcache_mm/manager/factory.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from typing import Union, Callable -from modelcache_mm.manager_mm import CacheBase, VectorBase, ObjectBase -from modelcache_mm.manager_mm.data_manager import SSDataManager, MapDataManager +from modelcache_mm.manager import CacheBase, VectorBase, ObjectBase +from modelcache_mm.manager.data_manager import SSDataManager, MapDataManager def get_data_manager( @@ -16,7 +16,6 @@ def get_data_manager( ): if not cache_base and not vector_base: return MapDataManager(data_path, max_size, get_data_container) - if isinstance(cache_base, str): cache_base = CacheBase(name=cache_base) if isinstance(vector_base, str): diff --git a/modelcache_mm/manager_mm/object_data/__init__.py b/modelcache_mm/manager/object_data/__init__.py similarity index 100% rename from modelcache_mm/manager_mm/object_data/__init__.py rename to modelcache_mm/manager/object_data/__init__.py diff --git a/modelcache_mm/manager_mm/object_data/base.py b/modelcache_mm/manager/object_data/base.py similarity index 100% rename from modelcache_mm/manager_mm/object_data/base.py rename to modelcache_mm/manager/object_data/base.py diff --git a/modelcache_mm/manager_mm/scalar_data/__init__.py b/modelcache_mm/manager/scalar_data/__init__.py similarity index 53% rename from modelcache_mm/manager_mm/scalar_data/__init__.py rename to modelcache_mm/manager/scalar_data/__init__.py index b63c430..e499db4 100644 --- a/modelcache_mm/manager_mm/scalar_data/__init__.py +++ b/modelcache_mm/manager/scalar_data/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- -from modelcache.utils.lazy_import import LazyImport +from modelcache_mm.utils.lazy_import import LazyImport scalar_manager = LazyImport( - "scalar_manager", globals(), "modelcache.manager.scalar_data.manager" + "scalar_manager", globals(), "modelcache_mm.manager.scalar_data.manager" ) diff --git a/modelcache_mm/manager_mm/scalar_data/base.py b/modelcache_mm/manager/scalar_data/base.py similarity index 100% rename from modelcache_mm/manager_mm/scalar_data/base.py rename to modelcache_mm/manager/scalar_data/base.py diff --git a/modelcache_mm/manager_mm/scalar_data/manager.py b/modelcache_mm/manager/scalar_data/manager.py similarity index 74% rename from modelcache_mm/manager_mm/scalar_data/manager.py rename to modelcache_mm/manager/scalar_data/manager.py index 4c02c45..3781787 100644 --- a/modelcache_mm/manager_mm/scalar_data/manager.py +++ b/modelcache_mm/manager/scalar_data/manager.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from modelcache.utils import import_sql_client -from modelcache.utils.error import NotFoundError +from modelcache_mm.utils import import_sql_client +from modelcache_mm.utils.error import NotFoundError SQL_URL = {"sqlite": "./sqlite.db"} @@ -19,12 +19,12 @@ def __init__(self): def get(name, **kwargs): if name in ["mysql", "oceanbase"]: - from modelcache.manager.scalar_data.sql_storage import SQLStorage + from modelcache_mm.manager.scalar_data.sql_storage import SQLStorage config = kwargs.get("config") import_sql_client(name) cache_base = SQLStorage(db_type=name, config=config) elif name == 'sqlite': - from modelcache.manager.scalar_data.sql_storage_sqlite import SQLStorage + from modelcache_mm.manager.scalar_data.sql_storage_sqlite import SQLStorage sql_url = kwargs.get("sql_url", SQL_URL[name]) cache_base = SQLStorage(db_type=name, url=sql_url) else: diff --git a/modelcache_mm/manager_mm/scalar_data/sql_storage.py b/modelcache_mm/manager/scalar_data/sql_storage.py similarity index 70% rename from modelcache_mm/manager_mm/scalar_data/sql_storage.py rename to modelcache_mm/manager/scalar_data/sql_storage.py index e7f2b7a..8ddd5ff 100644 --- a/modelcache_mm/manager_mm/scalar_data/sql_storage.py +++ b/modelcache_mm/manager/scalar_data/sql_storage.py @@ -6,7 +6,7 @@ import json import base64 from typing import List -from modelcache.manager.scalar_data.base import CacheStorage, CacheData +from modelcache_mm.manager.scalar_data.base import CacheStorage, CacheData from DBUtils.PooledDB import PooledDB @@ -34,6 +34,29 @@ def __init__( def create(self): pass + # def _insert(self, data: List): + # answer = data[0] + # text = data[1] + # image_url = data[2] + # image_id = data[3] + # model = data[4] + # answer_type = 0 + # + # table_name = "multimodal_answer" + # insert_sql = "INSERT INTO {} (question_text, image_url, image_id, answer, answer_type, model) VALUES (%s, %s, %s, %s, %s, %s)".format(table_name) + # conn = self.pool.connection() + # try: + # with conn.cursor() as cursor: + # # data insert operation + # values = (text, image_url, image_id, answer, answer_type, model) + # cursor.execute(insert_sql, values) + # conn.commit() + # id = cursor.lastrowid + # finally: + # # Close the connection and return it back to the connection pool + # conn.close() + # return id + def _insert(self, data: List): answer = data[0] text = data[1] @@ -42,19 +65,21 @@ def _insert(self, data: List): model = data[4] answer_type = 0 - table_name = "multimodal_answer" + table_name = "open_cache_mm_answer" insert_sql = "INSERT INTO {} (question_text, image_url, image_id, answer, answer_type, model) VALUES (%s, %s, %s, %s, %s, %s)".format(table_name) + conn = self.pool.connection() try: with conn.cursor() as cursor: - # data insert operation + # insert data operation values = (text, image_url, image_id, answer, answer_type, model) cursor.execute(insert_sql, values) conn.commit() id = cursor.lastrowid finally: - # Close the connection and return it back to the connection pool + # Close the connection and return it to the connection pool. conn.close() + print('insert retrun id: {}'.format(id)) return id def batch_insert(self, all_data: List[CacheData]): @@ -89,12 +114,31 @@ def insert_query_resp(self, query_resp, **kwargs): # 关闭连接,将连接返回给连接池 conn.close() + # def get_data_by_id(self, key: int): + # table_name = "cache_codegpt_answer" + # query_sql = "select question, answer, embedding_data, model from {} where id={}".format(table_name, key) + # conn_start = time.time() + # conn = self.pool.connection() + # + # search_start = time.time() + # try: + # with conn.cursor() as cursor: + # # 执行数据库操作 + # cursor.execute(query_sql) + # resp = cursor.fetchone() + # finally: + # # 关闭连接,将连接返回给连接池 + # conn.close() + # + # if resp is not None and len(resp) == 4: + # return resp + # else: + # return None + def get_data_by_id(self, key: int): - table_name = "cache_codegpt_answer" - query_sql = "select question, answer, embedding_data, model from {} where id={}".format(table_name, key) - conn_start = time.time() + table_name = "open_cache_mm_answer" + query_sql = "select question_text, image_url, image_id, answer, model from {} where id={}".format(table_name, key) conn = self.pool.connection() - search_start = time.time() try: with conn.cursor() as cursor: @@ -104,8 +148,9 @@ def get_data_by_id(self, key: int): finally: # 关闭连接,将连接返回给连接池 conn.close() + print('ob_search_cost_time: {}'.format(time.time() - search_start)) - if resp is not None and len(resp) == 4: + if resp is not None and len(resp) == 5: return resp else: return None diff --git a/modelcache_mm/manager_mm/scalar_data/sql_storage_sqlite.py b/modelcache_mm/manager/scalar_data/sql_storage_sqlite.py similarity index 100% rename from modelcache_mm/manager_mm/scalar_data/sql_storage_sqlite.py rename to modelcache_mm/manager/scalar_data/sql_storage_sqlite.py diff --git a/modelcache_mm/manager_mm/vector_data/__init__.py b/modelcache_mm/manager/vector_data/__init__.py similarity index 58% rename from modelcache_mm/manager_mm/vector_data/__init__.py rename to modelcache_mm/manager/vector_data/__init__.py index 1bf0642..44c30ee 100644 --- a/modelcache_mm/manager_mm/vector_data/__init__.py +++ b/modelcache_mm/manager/vector_data/__init__.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- -from modelcache.utils.lazy_import import LazyImport +from modelcache_mm.utils.lazy_import import LazyImport vector_manager = LazyImport( - "vector_manager", globals(), "modelcache.manager_mm.vector_data.manager" + "vector_manager", globals(), "modelcache_mm.manager.vector_data.manager" ) diff --git a/modelcache_mm/manager_mm/vector_data/base.py b/modelcache_mm/manager/vector_data/base.py similarity index 66% rename from modelcache_mm/manager_mm/vector_data/base.py rename to modelcache_mm/manager/vector_data/base.py index b9692f3..98151a8 100644 --- a/modelcache_mm/manager_mm/vector_data/base.py +++ b/modelcache_mm/manager/vector_data/base.py @@ -15,11 +15,19 @@ class VectorBase(ABC): """VectorBase: base vector store interface""" @abstractmethod - def add(self, datas: List[VectorData], model=None): + def add(self, datas: List[VectorData], model=None, mm_type=None): + pass + + # @abstractmethod + # def search(self, data: np.ndarray, top_k: int, model): + # pass + + @abstractmethod + def search(self, data: np.ndarray, top_k: int, model, mm_type): pass @abstractmethod - def search(self, data: np.ndarray, top_k: int, model): + def create(self, model=None, mm_type=None): pass @abstractmethod diff --git a/modelcache_mm/manager_mm/vector_data/faiss.py b/modelcache_mm/manager/vector_data/faiss.py similarity index 100% rename from modelcache_mm/manager_mm/vector_data/faiss.py rename to modelcache_mm/manager/vector_data/faiss.py diff --git a/modelcache_mm/manager_mm/vector_data/manager.py b/modelcache_mm/manager/vector_data/manager.py similarity index 97% rename from modelcache_mm/manager_mm/vector_data/manager.py rename to modelcache_mm/manager/vector_data/manager.py index c399a88..cc3f4b1 100644 --- a/modelcache_mm/manager_mm/vector_data/manager.py +++ b/modelcache_mm/manager/vector_data/manager.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from modelcache.utils.error import NotFoundError, ParamError +from modelcache_mm.utils.error import NotFoundError, ParamError TOP_K = 1 FAISS_INDEX_PATH = "faiss.index" @@ -69,7 +69,7 @@ def get(name, **kwargs): local_data=local_data ) elif name == "redis": - from modelcache_mm.manager_mm.vector_data.redis import RedisVectorStore + from modelcache_mm.manager.vector_data.redis import RedisVectorStore redis_config = kwargs.get("redis_config") mm_dimension = kwargs.get("mm_dimension", DIMENSION) diff --git a/modelcache_mm/manager_mm/vector_data/milvus.py b/modelcache_mm/manager/vector_data/milvus.py similarity index 100% rename from modelcache_mm/manager_mm/vector_data/milvus.py rename to modelcache_mm/manager/vector_data/milvus.py diff --git a/modelcache_mm/manager_mm/vector_data/redis.py b/modelcache_mm/manager/vector_data/redis.py similarity index 84% rename from modelcache_mm/manager_mm/vector_data/redis.py rename to modelcache_mm/manager/vector_data/redis.py index d712406..8295005 100644 --- a/modelcache_mm/manager_mm/vector_data/redis.py +++ b/modelcache_mm/manager/vector_data/redis.py @@ -6,11 +6,11 @@ from redis.commands.search.field import VectorField, NumericField from redis.client import Redis -from modelcache_mm.manager_mm.vector_data.base import VectorBase, VectorData -from modelcache.utils import import_redis -from modelcache.utils.log import modelcache_log -from modelcache.utils.index_util import get_mm_index_name -from modelcache.utils.index_util import get_mm_index_prefix +from modelcache_mm.manager.vector_data.base import VectorBase, VectorData +from modelcache_mm.utils import import_redis +from modelcache_mm.utils.log import modelcache_log +from modelcache_mm.utils.index_util import get_mm_index_name +from modelcache_mm.utils.index_util import get_mm_index_prefix import_redis() @@ -51,13 +51,13 @@ def _check_index_exists(self, index_name: str) -> bool: modelcache_log.info("Index already exists") return True - def create_index(self, index_name, mm_type, index_prefix): + def create_index(self, index_name, type, index_prefix): # dimension = self.dimension - if mm_type == 'IMG_TEXT': + if type == 'IMG_TEXT': dimension = self.mm_dimension - elif mm_type == 'IMG': + elif type == 'IMG': dimension = self.i_dimension - elif mm_type == 'TEXT': + elif type == 'TEXT': dimension = self.t_dimension else: raise ValueError('dimension type exception') @@ -119,6 +119,16 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None, mm_type=None): ) return [(float(result.distance), int(getattr(result, id_field_name))) for result in results] + def create(self, model=None, mm_type=None): + collection_name_model = get_mm_index_name(model, mm_type) + try: + index_prefix = get_mm_index_prefix(model, mm_type) + self.create_index(collection_name_model, mm_type, index_prefix) + except Exception as e: + raise ValueError(str(e)) + return 'success' + + def rebuild(self, ids=None) -> bool: pass @@ -143,10 +153,10 @@ def delete(self, ids) -> None: pipe.delete(f"{self.doc_prefix}{data_id}") pipe.execute() - def create(self, model=None, mm_type=None): - index_name = get_mm_index_name(model, mm_type) - index_prefix = get_mm_index_prefix(model, mm_type) - return self.create_index(index_name, mm_type, index_prefix) + def create(self, model=None, type=None): + index_name = get_mm_index_name(model, type) + index_prefix = get_mm_index_prefix(model, type) + return self.create_index(index_name, type, index_prefix) def get_index_by_name(self, index_name): pass diff --git a/modelcache_mm/manager_mm/data_manager_mm.py b/modelcache_mm/manager_mm/data_manager_mm.py deleted file mode 100644 index d6637ea..0000000 --- a/modelcache_mm/manager_mm/data_manager_mm.py +++ /dev/null @@ -1,291 +0,0 @@ -# -*- coding: utf-8 -*- -import logging -import time -import requests -import pickle -import numpy as np -import cachetools -from abc import abstractmethod, ABCMeta -from typing import List, Any, Optional, Union -from modelcache.manager.scalar_data.base import ( - CacheStorage, - CacheData, - DataType, - Answer, - Question -) -from modelcache.utils.error import CacheError, ParamError -from modelcache.manager.vector_data.base import VectorBase, VectorData -from modelcache.manager.object_data.base import ObjectBase -from modelcache.manager.eviction import EvictionBase -from modelcache.manager.eviction_manager import EvictionManager -from modelcache.utils.log import modelcache_log - - -class DataManager(metaclass=ABCMeta): - """DataManager manage the cache data, including save and search""" - - @abstractmethod - def save(self, question, answer, embedding_data, **kwargs): - pass - - @abstractmethod - def save_query_resp(self, query_resp_dict, **kwargs): - pass - - @abstractmethod - def import_data( - self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model:Any - ): - pass - - @abstractmethod - def get_scalar_data(self, res_data, **kwargs) -> CacheData: - pass - - @abstractmethod - def update_hit_count(self, primary_id, **kwargs): - pass - - def hit_cache_callback(self, res_data, **kwargs): - pass - - @abstractmethod - def search(self, embedding_data, **kwargs): - pass - - @abstractmethod - def delete(self, id_list, **kwargs): - pass - - def truncate(self, model_name): - pass - - def flush(self): - pass - - @abstractmethod - def close(self): - pass - - -class MapDataManager(DataManager): - def __init__(self, data_path, max_size, get_data_container=None): - if get_data_container is None: - self.data = cachetools.LRUCache(max_size) - else: - self.data = get_data_container(max_size) - self.data_path = data_path - self.init() - - def init(self): - try: - with open(self.data_path, "rb") as f: - self.data = pickle.load(f) - except FileNotFoundError: - return - except PermissionError: - raise CacheError( # pylint: disable=W0707 - f"You don't have permission to access this file <{self.data_path}>." - ) - - def save(self, question, answer, embedding_data, **kwargs): - if isinstance(question, Question): - question = question.content - self.data[embedding_data] = (question, answer, embedding_data) - - def save_query_resp(self, query_resp_dict, **kwargs): - pass - - def import_data( - self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model: Any - ): - if len(questions) != len(answers) or len(questions) != len(embedding_datas): - raise ParamError("Make sure that all parameters have the same length") - for i, embedding_data in enumerate(embedding_datas): - self.data[embedding_data] = (questions[i], answers[i], embedding_datas[i]) - - def get_scalar_data(self, res_data, **kwargs) -> CacheData: - return CacheData(question=res_data[0], answers=res_data[1]) - - def update_hit_count(self, primary_id, **kwargs): - pass - - def search(self, embedding_data, **kwargs): - try: - return [self.data[embedding_data]] - except KeyError: - return [] - - def delete(self, id_list, **kwargs): - pass - - def truncate(self, model_name): - pass - - def flush(self): - try: - with open(self.data_path, "wb") as f: - pickle.dump(self.data, f) - except PermissionError: - modelcache_log.error( - "You don't have permission to access this file %s.", self.data_path - ) - - def close(self): - self.flush() - - -def normalize(vec): - magnitude = np.linalg.norm(vec) - normalized_v = vec / magnitude - return normalized_v - - -class SSDataManager(DataManager): - def __init__( - self, - s: CacheStorage, - v: VectorBase, - o: Optional[ObjectBase], - max_size, - clean_size, - policy="LRU", - ): - self.max_size = max_size - self.clean_size = clean_size - self.s = s - self.v = v - self.o = o - - def save(self, question, answer, embedding_data, **kwargs): - model = kwargs.pop("model", None) - self.import_data([question], [answer], [embedding_data], model) - - def save_query_resp(self, query_resp_dict, **kwargs): - save_query_start_time = time.time() - self.s.insert_query_resp(query_resp_dict, **kwargs) - save_query_delta_time = '{}s'.format(round(time.time() - save_query_start_time, 2)) - - def _process_answer_data(self, answers: Union[Answer, List[Answer]]): - if isinstance(answers, Answer): - answers = [answers] - new_ans = [] - for ans in answers: - if ans.answer_type != DataType.STR: - new_ans.append(Answer(self.o.put(ans.answer), ans.answer_type)) - else: - new_ans.append(ans) - return new_ans - - def _process_question_data(self, question: Union[str, Question]): - if isinstance(question, Question): - if question.deps is None: - return question - - for dep in question.deps: - if dep.dep_type == DataType.IMAGE_URL: - dep.dep_type.data = self.o.put(requests.get(dep.data).content) - return question - - return Question(question) - - def import_data( - self, questions: List[Any], answers: List[Answer], embedding_datas: List[Any], model: Any - ): - if len(questions) != len(answers) or len(questions) != len(embedding_datas): - raise ParamError("Make sure that all parameters have the same length") - cache_datas = [] - - embedding_datas = [ - normalize(embedding_data) for embedding_data in embedding_datas - ] - - for i, embedding_data in enumerate(embedding_datas): - if self.o is not None: - ans = self._process_answer_data(answers[i]) - else: - ans = answers[i] - - question = questions[i] - embedding_data = embedding_data.astype("float32") - cache_datas.append([ans, question, embedding_data, model]) - - ids = self.s.batch_insert(cache_datas) - logging.info('ids: {}'.format(ids)) - self.v.mul_add( - [ - VectorData(id=ids[i], data=embedding_data) - for i, embedding_data in enumerate(embedding_datas) - ], - model - - ) - - def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: - cache_data = self.s.get_data_by_id(res_data[1]) - if cache_data is None: - return None - return cache_data - - def update_hit_count(self, primary_id, **kwargs): - self.s.update_hit_count_by_id(primary_id) - - def hit_cache_callback(self, res_data, **kwargs): - self.eviction_base.get(res_data[1]) - - def search(self, embedding_data, **kwargs): - model = kwargs.pop("model", None) - embedding_data = normalize(embedding_data) - top_k = kwargs.get("top_k", -1) - return self.v.search(data=embedding_data, top_k=top_k, model=model) - - def delete(self, id_list, **kwargs): - model = kwargs.pop("model", None) - try: - v_delete_count = self.v.delete(ids=id_list, model=model) - except Exception as e: - return {'status': 'failed', 'milvus': 'delete milvus data failed, please check! e: {}'.format(e), - 'mysql': 'unexecuted'} - try: - s_delete_count = self.s.mark_deleted(id_list) - except Exception as e: - return {'status': 'failed', 'milvus': 'success', - 'mysql': 'delete mysql data failed, please check! e: {}'.format(e)} - - return {'status': 'success', 'milvus': 'delete_count: '+str(v_delete_count), - 'mysql': 'delete_count: '+str(s_delete_count)} - - def create_index(self, model, **kwargs): - return self.v.create(model) - - def truncate(self, model_name): - # drop vector base data - try: - vector_resp = self.v.rebuild_col(model_name) - except Exception as e: - return {'status': 'failed', 'VectorDB': 'truncate VectorDB data failed, please check! e: {}'.format(e), - 'ScalarDB': 'unexecuted'} - if vector_resp: - return {'status': 'failed', 'VectorDB': vector_resp, 'ScalarDB': 'unexecuted'} - # drop scalar base data - try: - delete_count = self.s.model_deleted(model_name) - except Exception as e: - return {'status': 'failed', 'VectorDB': 'rebuild', - 'ScalarDB': 'truncate scalar data failed, please check! e: {}'.format(e)} - return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} - - def flush(self): - self.s.flush() - self.v.flush() - - def close(self): - self.s.close() - self.v.close() - - -# if __name__ == '__main__': -# from modelcache.manager import CacheBase, VectorBase, get_data_manager -# data_manager = get_data_manager(CacheBase('mysql'), VectorBase('milvus', dimension=128)) -# data_manager.save('hello', 'hi', np.random.random((128,)).astype('float32'), model='gptcode_6b') diff --git a/modelcache_mm/manager_mm/__init__.py b/modelcache_mm/processor/__init__.py similarity index 100% rename from modelcache_mm/manager_mm/__init__.py rename to modelcache_mm/processor/__init__.py diff --git a/modelcache_mm/processor/post.py b/modelcache_mm/processor/post.py new file mode 100644 index 0000000..f92ddc5 --- /dev/null +++ b/modelcache_mm/processor/post.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +import random +from typing import List, Any + + +def random_one(messages: List[Any]) -> Any: + return random.choice(messages) + + +def first(messages: List[Any]) -> Any: + return messages[0] + + +def nop(messages: List[Any]) -> Any: + return messages diff --git a/modelcache_mm/processor/pre.py b/modelcache_mm/processor/pre.py new file mode 100644 index 0000000..13bc8f4 --- /dev/null +++ b/modelcache_mm/processor/pre.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +import re +from typing import Dict, Any + + +def insert_last_content(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + return data.get("chat_info")[-1]["query"] + + +def query_last_content(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + return data.get("query")[-1]["content"] + + +def last_content_without_prompt(data: Dict[str, Any], **params: Dict[str, Any]) -> Any: + last_content_str = data.get("messages")[-1]["content"] + prompts = params.get("prompts", []) + if prompts is None: + return last_content_str + pattern = "|".join(prompts) + new_content_str = re.sub(pattern, "", last_content_str) + return new_content_str + + +def all_content(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + s = "" + messages = data.get("messages") + for i, message in enumerate(messages): + if i == len(messages) - 1: + s += message["content"] + else: + s += message["content"] + "\n" + return s + + +def nop(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + return data + + +def get_prompt(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + return data.get("prompt") + + +def get_file_name(data: Dict[str, Any], **_: Dict[str, Any]) -> str: + return data.get("file").name + + +def get_file_bytes(data: Dict[str, Any], **_: Dict[str, Any]) -> bytes: + return data.get("file").peek() + + +def get_input_str(data: Dict[str, Any], **_: Dict[str, Any]) -> str: + input_data = data.get("input") + return str(input_data["image"].peek()) + input_data["question"] + + +def get_input_image_file_name(data: Dict[str, Any], **_: Dict[str, Any]) -> str: + input_data = data.get("input") + return input_data["image"].name + + +def query_multi_splicing(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + query_list = data.get("query") + return multi_splicing(query_list) + + +def insert_multi_splicing(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + insert_query_list = data.get("chat_info")[-1]['query'] + return multi_splicing(insert_query_list) + + +def multi_splicing(data_list) -> Any: + result_str = "" + for d in data_list: + role = d.get('role', '') + content = d.get('content', '') + result_str += role + "###" + content + "|||" + + # 去掉最后一个"|||" + result_str = result_str[:-3] + + return result_str + + +def multi_analysis(dialog_str): + sub_strings = dialog_str.split('|||') + + dict_list = [] + for s in sub_strings: + parts = s.split('###') + + if len(parts) == 2: + role = parts[0] + content = parts[1] + elif len(parts) > 2: + role = parts[0] + content = '###'.join(parts[1:]) + else: + content = 'exception' + + if content == '': + d = {"role": role} + else: + d = {"role": role, "content": content} + dict_list.append(d) + + # 3. 将每个字典添加到一个列表中,得到最终的列表 + result_list = dict_list + + # 输出结果 + return result_list + + +def mm_insert_dict(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + print('chat_info: {}'.format(data.get("chat_info"))) + query_dict = data.get("chat_info")[-1]['query'] + return query_dict + + +def mm_query_dict(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + query_dict = data.get("query") + return query_dict diff --git a/modelcache_mm/report.py b/modelcache_mm/report.py new file mode 100644 index 0000000..5ac4ce8 --- /dev/null +++ b/modelcache_mm/report.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +class Report: + def __init__(self): + self.embedding_all_time = 0 + self.embedding_count = 0 + self.search_all_time = 0 + self.search_count = 0 + self.hint_cache_count = 0 + + def embedding(self, delta_time): + """Embedding counts and time. + + :param delta_time: additional runtime. + """ + self.embedding_all_time += delta_time + self.embedding_count += 1 + + def search(self, delta_time): + """Search counts and time. + + :param delta_time: additional runtime. + """ + self.search_all_time += delta_time + self.search_count += 1 + + def average_embedding_time(self): + """Average embedding time.""" + return round( + self.embedding_all_time / self.embedding_count + if self.embedding_count != 0 + else 0, + 4, + ) + + def average_search_time(self): + return round( + self.search_all_time / self.search_count + if self.embedding_count != 0 + else 0, + 4, + ) + + def hint_cache(self): + self.hint_cache_count += 1 diff --git a/modelcache_mm/similarity_evaluation/__init__.py b/modelcache_mm/similarity_evaluation/__init__.py new file mode 100644 index 0000000..22dff8f --- /dev/null +++ b/modelcache_mm/similarity_evaluation/__init__.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +from modelcache.similarity_evaluation.similarity_evaluation import SimilarityEvaluation +from modelcache.utils.lazy_import import LazyImport + +exact_match = LazyImport( + "exact_match", globals(), "modelcache.similarity_evaluation.exact_match" +) + + +def ExactMatchEvaluation(): + return exact_match.ExactMatchEvaluation() diff --git a/modelcache_mm/similarity_evaluation/distance.py b/modelcache_mm/similarity_evaluation/distance.py new file mode 100644 index 0000000..44ca595 --- /dev/null +++ b/modelcache_mm/similarity_evaluation/distance.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +from typing import Tuple, Dict, Any +from modelcache.similarity_evaluation import SimilarityEvaluation + + +class SearchDistanceEvaluation(SimilarityEvaluation): + def __init__(self, max_distance=4.0, positive=False): + self.max_distance = max_distance + self.positive = positive + + def evaluation( + self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_ + ) -> float: + distance, _ = cache_dict["search_result"] + if distance < 0: + distance = 0 + elif distance > self.max_distance: + distance = self.max_distance + if self.positive: + return distance + return self.max_distance - distance + + def range(self) -> Tuple[float, float]: + return 0.0, self.max_distance diff --git a/modelcache_mm/similarity_evaluation/exact_match.py b/modelcache_mm/similarity_evaluation/exact_match.py new file mode 100644 index 0000000..553bd59 --- /dev/null +++ b/modelcache_mm/similarity_evaluation/exact_match.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +from typing import Tuple, Dict, Any +from modelcache.similarity_evaluation.similarity_evaluation import SimilarityEvaluation + + +class ExactMatchEvaluation(SimilarityEvaluation): + + def __init__(self): + pass + + def evaluation( + self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **_ + ) -> float: + return 1 if cache_dict["question"] == src_dict["question"] else 0 + + def range(self) -> Tuple[float, float]: + return 0, 1 diff --git a/modelcache_mm/similarity_evaluation/similarity_evaluation.py b/modelcache_mm/similarity_evaluation/similarity_evaluation.py new file mode 100644 index 0000000..79f4294 --- /dev/null +++ b/modelcache_mm/similarity_evaluation/similarity_evaluation.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +from abc import ABCMeta, abstractmethod +from typing import Tuple, Dict, Any + + +class SimilarityEvaluation(metaclass=ABCMeta): + @abstractmethod + def evaluation( + self, src_dict: Dict[str, Any], cache_dict: Dict[str, Any], **kwargs + ) -> float: + pass + + @abstractmethod + def range(self) -> Tuple[float, float]: + pass diff --git a/modelcache_mm/utils/__init__.py b/modelcache_mm/utils/__init__.py new file mode 100644 index 0000000..147a56e --- /dev/null +++ b/modelcache_mm/utils/__init__.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +import importlib.util +from typing import Optional +from modelcache.utils.dependency_control import prompt_install + + +def _check_library(libname: str, prompt: bool = True, package: Optional[str] = None): + is_avail = False + if importlib.util.find_spec(libname): + is_avail = True + if not is_avail and prompt: + prompt_install(package if package else libname) + return is_avail + + +def import_onnxruntime(): + _check_library("onnxruntime") + + +def import_huggingface(): + _check_library("transformers") + + +def import_huggingface_hub(): + _check_library("huggingface_hub", package="huggingface-hub") + + +def import_pymysql(): + _check_library("pymysql") + + +def import_sql_client(db_name): + if db_name in ["mysql"]: + import_pymysql() + + +def import_pymilvus(): + _check_library("pymilvus") + + +def import_milvus_lite(): + _check_library("milvus") + + +def import_faiss(): + _check_library("faiss", package="faiss-cpu") + + +def import_torch(): + _check_library("torch") + + +def import_fasttext(): + _check_library("fasttext") + + +def import_paddle(): + prompt_install("protobuf==3.20.0") + _check_library("paddlepaddle") + + +def import_paddlenlp(): + _check_library("paddlenlp") + + +def import_timm(): + _check_library("timm", package="timm") + + +def import_pillow(): + _check_library("PIL", package="pillow") + + +def import_redis(): + _check_library("redis") diff --git a/modelcache_mm/utils/cache_func.py b/modelcache_mm/utils/cache_func.py new file mode 100644 index 0000000..461f542 --- /dev/null +++ b/modelcache_mm/utils/cache_func.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +def cache_all(*_, **__): + return True \ No newline at end of file diff --git a/modelcache_mm/utils/dependency_control.py b/modelcache_mm/utils/dependency_control.py new file mode 100644 index 0000000..0f94355 --- /dev/null +++ b/modelcache_mm/utils/dependency_control.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +import subprocess +from modelcache.utils.error import PipInstallError +from modelcache.utils.log import modelcache_log + + +def prompt_install(package: str, warn: bool = False): # pragma: no cover + """ + Function used to prompt user to install a package. + """ + cmd = f"pip install {package}" + try: + if warn and input(f"Install {package}? Y/n: ") != "Y": + raise ModuleNotFoundError(f"No module named {package}") + subprocess.check_call(cmd, shell=True) + modelcache_log.info("%s installed successfully!", package) + except subprocess.CalledProcessError as e: + raise PipInstallError(package) from e diff --git a/modelcache_mm/utils/env_config.py b/modelcache_mm/utils/env_config.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/modelcache_mm/utils/env_config.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/modelcache_mm/utils/error.py b/modelcache_mm/utils/error.py new file mode 100644 index 0000000..9022f00 --- /dev/null +++ b/modelcache_mm/utils/error.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +class CacheError(Exception): + """ModelCache base error""" + + +class NotInitError(CacheError): + """Raise when the cache has been used before it's inited""" + def __init__(self): + super().__init__("The cache should be inited before using") + + +class RemoveError(CacheError): + """Raise when the cache has been used before it's inited""" + def __init__(self): + super().__init__("The cache remove error") + +class NotFoundError(CacheError): + """Raise when getting an unsupported store.""" + def __init__(self, store_type, current_type_name): + super().__init__(f"Unsupported ${store_type}: {current_type_name}") + + +class ParamError(CacheError): + """Raise when receiving an invalid param.""" + + +class PipInstallError(CacheError): + """Raise when failed to install package.""" + def __init__(self, package): + super().__init__(f"Ran into error installing {package}.") + + +class MultiTypeError(CacheError): + def __init__(self): + super().__init__("multichat type error, please check") diff --git a/modelcache_mm/utils/index_util.py b/modelcache_mm/utils/index_util.py new file mode 100644 index 0000000..48c1a8d --- /dev/null +++ b/modelcache_mm/utils/index_util.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + + +def get_index_name(model): + return 'modelcache' + '_' + model + + +def get_index_prefix(model): + return 'prefix' + '_' + model + + +def get_mm_index_name(model, mm_type): + print('mm_type: {}'.format(mm_type)) + if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: + raise ValueError('mm_type is not normal!') + if mm_type == 'IMG_TEXT': + mm_type = 'mm' + elif mm_type == 'IMG': + mm_type = 'image' + elif mm_type == 'TEXT': + mm_type = 'text' + return 'multicache' + '_' + model + '_' + mm_type + + +def get_mm_index_prefix(model, mm_type): + if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: + print('mm_type: {}'.format(mm_type)) + raise ValueError('mm_type is not normal!') + if mm_type == 'IMG_TEXT': + mm_type = 'mm' + elif mm_type == 'IMG': + mm_type = 'image' + elif mm_type == 'TEXT': + mm_type = 'text' + return 'prefix' + '_' + model + '_' + mm_type diff --git a/modelcache_mm/utils/lazy_import.py b/modelcache_mm/utils/lazy_import.py new file mode 100644 index 0000000..a0f0c6a --- /dev/null +++ b/modelcache_mm/utils/lazy_import.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +import importlib +from types import ModuleType + + +class LazyImport(ModuleType): + """ + Lazily import a module. + """ + + def __init__(self, local_name, parent_module_globals, name): + self._local_name = local_name + self._parent_module_globals = parent_module_globals + super().__init__(name) + + def _load(self): + module = importlib.import_module(self.__name__) + self._parent_module_globals[self._local_name] = module + self.__dict__.update(module.__dict__) + return module + + def __getattr__(self, item): + module = self._load() + return getattr(module, item) + + def __dir__(self): + module = self._load() + return dir(module) diff --git a/modelcache_mm/utils/log.py b/modelcache_mm/utils/log.py new file mode 100644 index 0000000..68f4bd4 --- /dev/null +++ b/modelcache_mm/utils/log.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +import logging + +FORMAT = '%(asctime)s - %(thread)d - %(filename)s-%(module)s:%(lineno)s - %(levelname)s: %(message)s' +logging.basicConfig(format=FORMAT) + +modelcache_log = logging.getLogger('modelcache') diff --git a/modelcache_mm/utils/model_filter.py b/modelcache_mm/utils/model_filter.py new file mode 100644 index 0000000..de38882 --- /dev/null +++ b/modelcache_mm/utils/model_filter.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +def model_blacklist_filter(model, request_type): + black_list = ['DI_COPILOT_SECOND', 'DI_COPILOT_LAB', 'DI_COPILOT_THIRD'] + result = None + if model in black_list: + if request_type == 'query': + result = {"errorCode": 105, + "errorDesc": "model: {} in blacklist".format(model), + "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + elif request_type == 'insert': + result = {"errorCode": 305, "errorDesc": "model: {} in blacklist".format(model), "writeStatus": ""} + + return result + + diff --git a/modelcache_mm/utils/time.py b/modelcache_mm/utils/time.py new file mode 100644 index 0000000..7074b58 --- /dev/null +++ b/modelcache_mm/utils/time.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +import time +from modelcache import cache + + +def time_cal(func, func_name=None, report_func=None): + def inner(*args, **kwargs): + time_start = time.time() + res = func(*args, **kwargs) + delta_time = time.time() - time_start + if cache.config.log_time_func: + cache.config.log_time_func( + func.__name__ if func_name is None else func_name, delta_time + ) + if report_func is not None: + report_func(delta_time) + return res + + return inner + diff --git a/multicache_serving.py b/multicache_serving.py index 096d576..3564a3d 100644 --- a/multicache_serving.py +++ b/multicache_serving.py @@ -17,19 +17,15 @@ import json import uuid import configparser -from modelcache import cache -# from modelcache.adapter import adapter -from modelcache_mm.adapter_mm import adapter -# from modelcache.manager import CacheBase, VectorBase, get_data_manager -from modelcache_mm.manager_mm import CacheBase, VectorBase, get_data_manager -from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation -from modelcache.processor.pre import mm_insert_dict -from modelcache.processor.pre import mm_query_dict from concurrent.futures import ThreadPoolExecutor -from modelcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi -from modelcache.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi_concurrent_sin -from modelcache.processor.pre import query_multi_splicing -from modelcache.processor.pre import insert_multi_splicing +from modelcache_mm import cache +from modelcache_mm.adapter import adapter +from modelcache_mm.manager import CacheBase, VectorBase, get_data_manager +from modelcache_mm.similarity_evaluation.distance import SearchDistanceEvaluation +from modelcache_mm.processor.pre import mm_insert_dict +from modelcache_mm.processor.pre import mm_query_dict +from modelcache_mm.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi +from modelcache_mm.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi_concurrent_sin def save_query_info(result, model, query, delta_time_log): @@ -59,13 +55,13 @@ def __init__(self): text_dimension = 768 mysql_config = configparser.ConfigParser() - mysql_config.read('modelcache/config/mysql_config.ini') + mysql_config.read('modelcache_mm/config/mysql_config.ini') # milvus_config = configparser.ConfigParser() # milvus_config.read('modelcache/config/milvus_config.ini') redis_config = configparser.ConfigParser() - redis_config.read('modelcache/config/redis_config.ini') + redis_config.read('modelcache_mm/config/redis_config.ini') data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), VectorBase("redis", mm_dimension=image_dimension+text_dimension, @@ -76,16 +72,14 @@ def __init__(self): embedding_concurrent_func=get_embedding_multi_concurrent_sin, data_manager=data_manager, similarity_evaluation=SearchDistanceEvaluation(), - query_pre_embedding_func=query_multi_splicing, - insert_pre_embedding_func=insert_multi_splicing, - mm_insert_pre_embedding_func=mm_insert_dict, - mm_query_pre_embedding_func=mm_query_dict, + insert_pre_embedding_func=mm_insert_dict, + query_pre_embedding_func=mm_query_dict, ) self.gptcache_version = datetime.now().strftime("%Y-%m-%d %H:%M") self.executor = ThreadPoolExecutor(max_workers=6) def __call__(self, param): - print('mm_version: {}'.format(self.gptcache_version)) + print('version: {}'.format(self.gptcache_version)) print('call_time: {}'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) try: param_dict = json.loads(param) @@ -108,16 +102,16 @@ def __call__(self, param): model = model.replace('.', '_') print('model: {}'.format(model)) - if request_type in ['mm_query', 'mm_insert']: - if request_type == 'mm_query': + if request_type in ['query', 'insert']: + if request_type == 'query': query = param_dict.get("query") - elif request_type == 'mm_insert': + elif request_type == 'insert': chat_info = param_dict.get("chat_info") query = chat_info[-1]['query'] - if request_type is None or request_type not in ['mm_query', 'mm_remove', 'mm_insert', 'mm_register']: + if request_type is None or request_type not in ['query', 'remove', 'insert', 'register']: result = {"errorCode": 102, - "errorDesc": "type exception, should one of ['mm_query', 'mm_insert', 'mm_remove', 'mm_register']", + "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} cache.data_manager.save_query_resp(result, model=model, query='', delta_time=0) return json.dumps(result) @@ -126,7 +120,7 @@ def __call__(self, param): "answer": ''} return json.dumps(result) - if request_type == 'mm_query': + if request_type == 'query': if UUID: try: uuid_list = UUID.split('==>') @@ -137,7 +131,7 @@ def __call__(self, param): print('uuid_e: {}'.format(e)) try: start_time = time.time() - response = adapter.ChatCompletion.create_mm_query( + response = adapter.ChatCompletion.create_query( scope={"model": model}, query=query, ) @@ -162,12 +156,13 @@ def __call__(self, param): query_time = round(time.time() - start_time, 2) print('query_time: {}'.format(query_time)) except Exception as e: - result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, - "hit_query": '', "answer": ''} + # result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, + # "hit_query": '', "answer": ''} + raise e print('result: {}'.format(result)) return json.dumps(result, ensure_ascii=False) - if request_type == 'mm_insert': + if request_type == 'insert': if UUID: try: uuid_list = UUID.split('==>') @@ -179,7 +174,7 @@ def __call__(self, param): try: start_time = time.time() try: - response = adapter.ChatCompletion.create_mm_insert( + response = adapter.ChatCompletion.create_insert( model=model, chat_info=chat_info, ) @@ -201,12 +196,12 @@ def __call__(self, param): # return json.dumps(result, ensure_ascii=False) raise e - if request_type == 'mm_remove': + if request_type == 'remove': remove_type = param_dict.get("remove_type") id_list = param_dict.get("id_list", []) print('remove_type: {}'.format(remove_type)) - response = adapter.ChatCompletion.create_mm_remove( + response = adapter.ChatCompletion.create_remove( model=model, remove_type=remove_type, id_list=id_list @@ -224,11 +219,11 @@ def __call__(self, param): result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} return json.dumps(result) - if request_type == 'mm_register': - mm_type = param_dict.get("mm_type") - response = adapter.ChatCompletion.create_mm_register( + if request_type == 'register': + type = param_dict.get("type") + response = adapter.ChatCompletion.create_register( model=model, - mm_type=mm_type + type=type ) if response in ['create_success', 'already_exists']: result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} @@ -252,23 +247,23 @@ def __health_check__(self): if __name__ == '__main__': # ============01 - request_type = 'mm_insert' - scope = {"model": "test_0313"} - # UUID = "820b0052-d9d8-11ee-95f1-52775e3e6fd1" + "==>" + str(time.time()) - UUID = str(uuid.uuid1()) + "==>" + str(time.time()) - print('UUID: {}'.format(UUID)) - img_data = "http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg" - query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], - 'imageRaw': '', - 'imageUrl': img_data, - 'imageId': 'ccc'} - answer = "应该注意小孩不要跑到铁轨上" - chat_info = [{"query": query, "answer": answer}] - data_dict = {'request_type': request_type, 'scope': scope, 'chat_info': chat_info, 'UUID': UUID} - r1 = json.dumps(data_dict) + # request_type = 'insert' + # scope = {"model": "test_0313"} + # # UUID = "820b0052-d9d8-11ee-95f1-52775e3e6fd1" + "==>" + str(time.time()) + # UUID = str(uuid.uuid1()) + "==>" + str(time.time()) + # print('UUID: {}'.format(UUID)) + # img_data = "http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg" + # query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], + # 'imageRaw': '', + # 'imageUrl': img_data, + # 'imageId': 'ccc'} + # answer = "应该注意小孩不要跑到铁轨上" + # chat_info = [{"query": query, "answer": answer}] + # data_dict = {'request_type': request_type, 'scope': scope, 'chat_info': chat_info, 'UUID': UUID} + # r1 = json.dumps(data_dict) # ============02 - # request_type = 'mm_query' + # request_type = 'query' # UUID = str(uuid.uuid1()) + "==>" + str(time.time()) # scope = {"model": "test_0313"} # img_data = 'http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg' @@ -279,17 +274,16 @@ def __health_check__(self): # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID}) # ============03 - # request_type = 'mm_remove' - # scope = {"model": "test_0313"} - # # mm_type = 'IMG_TEXT' - # remove_type = 'truncate_by_model' - # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'remove_type': remove_type}) + request_type = 'remove' + scope = {"model": "test_0313"} + remove_type = 'truncate_by_model' + r1 = json.dumps({'request_type': request_type, 'scope': scope, 'remove_type': remove_type}) # ============04 - # request_type = 'mm_register' + # request_type = 'register' # scope = {"model": "test_0313"} - # mm_type = 'IMG_TEXT' - # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'mm_type': mm_type}) + # type = 'IMG_TEXT' + # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'type': type}) user_backend = UserBackend() resp = user_backend(r1) From 21aaf6f6c04b45113b0d01a4d79b78342ff9582d Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 18 Apr 2024 14:02:36 +0800 Subject: [PATCH 30/98] delete file --- .gitignore | 5 +- modelcache/core.py | 6 - modelcache/manager/scalar_data/base.py | 4 - modelcache/manager/scalar_data/sql_storage.py | 1 - modelcache/processor/pre.py | 11 - modelcache/utils/index_util.py | 46 ++--- modelcache_mm/adapter/adapter_remove.py | 2 +- modelcache_mm/embedding/data2vec.py | 108 ---------- modelcache_mm/embedding/fasttext.py | 27 --- modelcache_mm/embedding/huggingface.py | 64 ------ modelcache_mm/embedding/llmEmb.py | 38 ---- modelcache_mm/embedding/onnx.py | 70 ------- modelcache_mm/embedding/paddlenlp.py | 60 ------ modelcache_mm/manager/data_manager.py | 32 ++- modelcache_mm/manager/eviction/manager.py | 25 --- .../manager/eviction/memory_cache.py | 44 ---- .../manager/scalar_data/sql_storage.py | 10 +- modelcache_mm/manager/vector_data/faiss.py | 53 ----- modelcache_mm/manager/vector_data/milvus.py | 189 ------------------ modelcache_mm/manager/vector_data/redis.py | 1 - multicache_serving.py | 8 +- 21 files changed, 62 insertions(+), 742 deletions(-) delete mode 100644 modelcache_mm/embedding/data2vec.py delete mode 100644 modelcache_mm/embedding/fasttext.py delete mode 100644 modelcache_mm/embedding/huggingface.py delete mode 100644 modelcache_mm/embedding/llmEmb.py delete mode 100644 modelcache_mm/embedding/onnx.py delete mode 100644 modelcache_mm/embedding/paddlenlp.py delete mode 100644 modelcache_mm/manager/eviction/manager.py delete mode 100644 modelcache_mm/manager/eviction/memory_cache.py delete mode 100644 modelcache_mm/manager/vector_data/faiss.py delete mode 100644 modelcache_mm/manager/vector_data/milvus.py diff --git a/.gitignore b/.gitignore index ac8448b..94daa29 100644 --- a/.gitignore +++ b/.gitignore @@ -135,7 +135,8 @@ dmypy.json /embedding_npy /flask_server *.bin -**/modelcache_serving.py **/maya_embedding_service -*.ini \ No newline at end of file +*.ini + +multicache_serving.py \ No newline at end of file diff --git a/modelcache/core.py b/modelcache/core.py index 8cb1745..f5484e3 100644 --- a/modelcache/core.py +++ b/modelcache/core.py @@ -35,10 +35,7 @@ def init( cache_enable_func=cache_all, query_pre_embedding_func=None, insert_pre_embedding_func=None, - mm_query_pre_embedding_func=None, - mm_insert_pre_embedding_func=None, embedding_func=string_embedding, - embedding_concurrent_func=string_embedding, data_manager: DataManager = get_data_manager(), similarity_evaluation=ExactMatchEvaluation(), post_process_messages_func=first, @@ -49,10 +46,7 @@ def init( self.cache_enable_func = cache_enable_func self.query_pre_embedding_func = query_pre_embedding_func self.insert_pre_embedding_func = insert_pre_embedding_func - self.mm_query_pre_embedding_func = mm_query_pre_embedding_func - self.mm_insert_pre_embedding_func = mm_insert_pre_embedding_func self.embedding_func = embedding_func - self.embedding_concurrent_func = embedding_concurrent_func self.data_manager: DataManager = data_manager self.similarity_evaluation = similarity_evaluation self.post_process_messages_func = post_process_messages_func diff --git a/modelcache/manager/scalar_data/base.py b/modelcache/manager/scalar_data/base.py index f080491..6db058d 100644 --- a/modelcache/manager/scalar_data/base.py +++ b/modelcache/manager/scalar_data/base.py @@ -94,10 +94,6 @@ class CacheStorage(metaclass=ABCMeta): def create(self): pass - @abstractmethod - def batch_iat_insert(self, all_data: List[CacheData]): - pass - @abstractmethod def insert_query_resp(self, query_resp, **kwargs): pass diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index e44b069..23361d5 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -40,7 +40,6 @@ def _insert(self, data: List): embedding_data = data[2] model = data[3] answer_type = 0 - print('embedding_data: {}'.format(embedding_data)) embedding_data = embedding_data.tobytes() table_name = "cache_codegpt_answer" diff --git a/modelcache/processor/pre.py b/modelcache/processor/pre.py index 13bc8f4..5875294 100644 --- a/modelcache/processor/pre.py +++ b/modelcache/processor/pre.py @@ -108,14 +108,3 @@ def multi_analysis(dialog_str): # 输出结果 return result_list - - -def mm_insert_dict(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: - print('chat_info: {}'.format(data.get("chat_info"))) - query_dict = data.get("chat_info")[-1]['query'] - return query_dict - - -def mm_query_dict(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: - query_dict = data.get("query") - return query_dict diff --git a/modelcache/utils/index_util.py b/modelcache/utils/index_util.py index b78d883..2a1cfca 100644 --- a/modelcache/utils/index_util.py +++ b/modelcache/utils/index_util.py @@ -9,26 +9,26 @@ def get_index_prefix(model): return 'prefix' + '_' + model -def get_mm_index_name(model, mm_type): - print('mm_type: {}'.format(mm_type)) - if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: - raise ValueError('mm_type is not normal!') - if mm_type == 'IMG_TEXT': - mm_type = 'mm' - elif mm_type == 'IMG': - mm_type = 'image' - elif mm_type == 'TEXT': - mm_type = 'text' - return 'multicache' + '_' + model + '_' + mm_type - - -def get_mm_index_prefix(model, mm_type): - if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: - raise ValueError('iat_type is not normal!') - if mm_type == 'IMG_TEXT': - mm_type = 'mm' - elif mm_type == 'IMG': - mm_type = 'image' - elif mm_type == 'TEXT': - mm_type = 'text' - return 'prefix' + '_' + model + '_' + mm_type +# def get_mm_index_name(model, mm_type): +# print('mm_type: {}'.format(mm_type)) +# if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: +# raise ValueError('mm_type is not normal!') +# if mm_type == 'IMG_TEXT': +# mm_type = 'mm' +# elif mm_type == 'IMG': +# mm_type = 'image' +# elif mm_type == 'TEXT': +# mm_type = 'text' +# return 'multicache' + '_' + model + '_' + mm_type +# +# +# def get_mm_index_prefix(model, mm_type): +# if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: +# raise ValueError('iat_type is not normal!') +# if mm_type == 'IMG_TEXT': +# mm_type = 'mm' +# elif mm_type == 'IMG': +# mm_type = 'image' +# elif mm_type == 'TEXT': +# mm_type = 'text' +# return 'prefix' + '_' + model + '_' + mm_type diff --git a/modelcache_mm/adapter/adapter_remove.py b/modelcache_mm/adapter/adapter_remove.py index f6e26fa..746a4ab 100644 --- a/modelcache_mm/adapter/adapter_remove.py +++ b/modelcache_mm/adapter/adapter_remove.py @@ -19,7 +19,7 @@ def adapt_remove(*args, **kwargs): print('id_list: {}'.format(id_list)) resp = chat_cache.data_manager.delete(id_list, model=model) elif remove_type == 'truncate_by_model': - resp = chat_cache.data_manager.truncate_iat(model) + resp = chat_cache.data_manager.truncate(model) else: resp = "remove_type_error" return resp diff --git a/modelcache_mm/embedding/data2vec.py b/modelcache_mm/embedding/data2vec.py deleted file mode 100644 index 274eb5a..0000000 --- a/modelcache_mm/embedding/data2vec.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import time -import numpy as np -import torch -from transformers import BertTokenizer, BertModel -from modelcache.embedding.base import BaseEmbedding - - -def mean_pooling(model_output, attention_mask): - token_embeddings = model_output[0] # First element of model_output contains all token embeddings - input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() - return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) - - -class Data2VecAudio(BaseEmbedding): - def __init__(self, model: str = "sentence-transformers/all-MiniLM-L6-v2"): - current_dir = os.path.dirname(os.path.abspath(__file__)) - parent_dir = os.path.dirname(current_dir) - model_dir = os.path.dirname(parent_dir) - model = os.path.join(model_dir, 'model/text2vec-base-chinese/') - - try: - self.__dimension = self.model.config.hidden_size - except Exception: - from transformers import AutoConfig - - config = AutoConfig.from_pretrained(model) - self.__dimension = config.hidden_size - - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.tokenizer = BertTokenizer.from_pretrained(model, local_files_only=True) - self.model = BertModel.from_pretrained(model, local_files_only=True) - - def to_embeddings(self, data, **_): - encoded_input = self.tokenizer(data, padding=True, truncation=True, return_tensors='pt') - num_tokens = sum(map(len, encoded_input['input_ids'])) - - if num_tokens <= 512: - with torch.no_grad(): - encoded_input = {k: v.to(self.device) for k, v in encoded_input.items()} - model_output = self.model(**encoded_input) - sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) - sentence_embeddings = sentence_embeddings.squeeze(0).detach().cpu().numpy() - embedding_array = np.array(sentence_embeddings).astype("float32") - return embedding_array - else: - window_size = 510 - start = 0 - input_ids = encoded_input['input_ids'] - input_ids = input_ids[:, 1:-1] - start_token = self.tokenizer.cls_token - end_token = self.tokenizer.sep_token - start_token_id = self.tokenizer.convert_tokens_to_ids(start_token) - end_token_id = self.tokenizer.convert_tokens_to_ids(end_token) - begin_element = torch.tensor([[start_token_id]]) - end_element = torch.tensor([[end_token_id]]) - - embedding_array_list = list() - while start < num_tokens: - # Calculate the ending position of the sliding window. - end = start + window_size - # If the ending position exceeds the length, adjust it to the length. - if end > num_tokens: - end = num_tokens - # Retrieve the data within the sliding window. - input_ids_window = input_ids[:, start:end] - # Insert a new element at position 0. - input_ids_window = torch.cat([begin_element, input_ids_window[:, 0:]], dim=1) - # Insert a new element at the last position. - input_ids_window = torch.cat([input_ids_window, end_element], dim=1) - input_ids_window_length = sum(map(len, input_ids_window)) - token_type_ids = torch.tensor([[0] * input_ids_window_length]) - attention_mask = torch.tensor([[1] * input_ids_window_length]) - - # Concatenate new input_ids - encoded_input_window = {'input_ids': input_ids_window, 'token_type_ids': token_type_ids, - 'attention_mask': attention_mask} - with torch.no_grad(): - encoded_input_window = {k: v.to(self.device) for k, v in encoded_input_window.items()} - model_output_window = self.model(**encoded_input_window) - - sentence_embeddings_window = mean_pooling(model_output_window, encoded_input_window['attention_mask']) - sentence_embeddings_window = sentence_embeddings_window.squeeze(0).detach().cpu().numpy() - embedding_array_window = np.array(sentence_embeddings_window).astype("float32") - embedding_array_list.append(embedding_array_window) - start = end - - embedding_array = np.mean(embedding_array_list, axis=0) - return embedding_array - - def post_proc(self, token_embeddings, inputs): - attention_mask = inputs["attention_mask"] - input_mask_expanded = ( - attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() - ) - sentence_embs = torch.sum( - token_embeddings * input_mask_expanded, 1 - ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) - return sentence_embs - - @property - def dimension(self): - """Embedding dimension. - - :return: embedding dimension - """ - return self.__dimension diff --git a/modelcache_mm/embedding/fasttext.py b/modelcache_mm/embedding/fasttext.py deleted file mode 100644 index adbc38c..0000000 --- a/modelcache_mm/embedding/fasttext.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import os -from modelcache.utils import import_fasttext -from modelcache.embedding.base import BaseEmbedding -import_fasttext() -import fasttext.util - - -class FastText(BaseEmbedding): - def __init__(self, model: str = "en", dim: int = None): - self.model_path = os.path.abspath(fasttext.util.download_model(model)) - self.ft = fasttext.load_model(self.model_path) - - if dim: - fasttext.util.reduce_model(self.ft, dim) - self.__dimension = self.ft.get_dimension() - - def to_embeddings(self, data, **_): - assert isinstance(data, str), "Only allow string as input." - emb = self.ft.get_sentence_vector(data) - return np.array(emb).astype("float32") - - @property - def dimension(self): - return self.__dimension - diff --git a/modelcache_mm/embedding/huggingface.py b/modelcache_mm/embedding/huggingface.py deleted file mode 100644 index 8c1434d..0000000 --- a/modelcache_mm/embedding/huggingface.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np - -from modelcache.utils import import_huggingface, import_torch -from modelcache.embedding.base import BaseEmbedding - -import_torch() -import_huggingface() - -import torch # pylint: disable=C0413 -from transformers import AutoTokenizer, AutoModel # pylint: disable=C0413 - - -class Huggingface(BaseEmbedding): - def __init__(self, model: str = "sentence-transformers/all-MiniLM-L6-v2"): - self.model = AutoModel.from_pretrained(model, local_files_only=True) - self.model.eval() - - # self.tokenizer = AutoTokenizer.from_pretrained(model) - self.tokenizer = AutoTokenizer.from_pretrained(model, local_files_only=True) - if not self.tokenizer.pad_token: - self.tokenizer.pad_token = "[PAD]" - try: - self.__dimension = self.model.config.hidden_size - except Exception: # pylint: disable=W0703 - from transformers import AutoConfig # pylint: disable=C0415 - - config = AutoConfig.from_pretrained(model) - self.__dimension = config.hidden_size - - def to_embeddings(self, data, **_): - """Generate embedding given text input - - :param data: text in string. - :type data: str - - :return: a text embedding in shape of (dim,). - """ - if not isinstance(data, list): - data = [data] - inputs = self.tokenizer( - data, padding=True, truncation=True, return_tensors="pt" - ) - outs = self.model(**inputs).last_hidden_state - emb = self.post_proc(outs, inputs).squeeze(0).detach().numpy() - return np.array(emb).astype("float32") - - def post_proc(self, token_embeddings, inputs): - attention_mask = inputs["attention_mask"] - input_mask_expanded = ( - attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() - ) - sentence_embs = torch.sum( - token_embeddings * input_mask_expanded, 1 - ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) - return sentence_embs - - @property - def dimension(self): - """Embedding dimension. - - :return: embedding dimension - """ - return self.__dimension diff --git a/modelcache_mm/embedding/llmEmb.py b/modelcache_mm/embedding/llmEmb.py deleted file mode 100644 index 096552c..0000000 --- a/modelcache_mm/embedding/llmEmb.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -from modelcache.embedding.base import BaseEmbedding -from transformers import AutoTokenizer -from transformers import AutoConfig - - -class LlmEmb2Vec(BaseEmbedding): - def __init__(self): - - self.model_name = '' # 13b-mft-embedding.npy - model_path = '' # .npy file storage path - model_file = model_path + self.model_name # .npy file - config = AutoConfig.from_pretrained(model_path) - dimension = config.hidden_size - self.__dimension = dimension - self.model = np.load(model_file) - self.tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True) - - def to_embeddings(self, data, **_): - """Generate embedding given text input - - :param data: text in string. - :return: a text embedding in shape of (dim,). - """ - input_ids = self.tokenizer.encode(data, add_special_tokens=True) - embedding_array = self.model[input_ids].mean(axis=0) - return embedding_array - - def post_proc(self, token_embeddings, inputs): - pass - - @property - def dimension(self): - """Embedding dimension. - :return: embedding dimension - """ - return self.__dimension diff --git a/modelcache_mm/embedding/onnx.py b/modelcache_mm/embedding/onnx.py deleted file mode 100644 index 9df64ff..0000000 --- a/modelcache_mm/embedding/onnx.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np - -from modelcache.embedding.base import BaseEmbedding -from modelcache.utils import ( - import_onnxruntime, - import_huggingface_hub, - import_huggingface, -) - -import_huggingface() -import_onnxruntime() -import_huggingface_hub() - -from transformers import AutoTokenizer, AutoConfig # pylint: disable=C0413 -import onnxruntime -from modelcache.utils.env_config import get_onnx_tokenizer_path, get_onnx_model - - -class Onnx(BaseEmbedding): - - def __init__(self, model="modelcache_open/paraphrase-albert-onnx"): - # 本地加载 - onnx_tokenizer = get_onnx_tokenizer_path() - self.tokenizer = AutoTokenizer.from_pretrained(onnx_tokenizer, local_files_only=True) - # 本地加载 - onnx_model = get_onnx_model() - self.ort_session = onnxruntime.InferenceSession(onnx_model) - - config = AutoConfig.from_pretrained(onnx_tokenizer, local_files_only=True) - self.__dimension = config.hidden_size - - def to_embeddings(self, data, **_): - """Generate embedding given text input. - - :param data: text in string. - :type data: str - - :return: a text embedding in shape of (dim,). - """ - encoded_text = self.tokenizer.encode_plus(data, padding="max_length") - ort_inputs = { - "input_ids": np.array(encoded_text["input_ids"]).reshape(1, -1), - "attention_mask": np.array(encoded_text["attention_mask"]).reshape(1, -1), - "token_type_ids": np.array(encoded_text["token_type_ids"]).reshape(1, -1), - } - - ort_outputs = self.ort_session.run(None, ort_inputs) - ort_feat = ort_outputs[0] - emb = self.post_proc(ort_feat, ort_inputs["attention_mask"]) - return emb.flatten() - - def post_proc(self, token_embeddings, attention_mask): - input_mask_expanded = ( - np.expand_dims(attention_mask, -1) - .repeat(token_embeddings.shape[-1], -1) - .astype(float) - ) - sentence_embs = np.sum(token_embeddings * input_mask_expanded, 1) / np.maximum( - input_mask_expanded.sum(1), 1e-9 - ) - return sentence_embs - - @property - def dimension(self): - """Embedding dimension. - - :return: embedding dimension - """ - return self.__dimension diff --git a/modelcache_mm/embedding/paddlenlp.py b/modelcache_mm/embedding/paddlenlp.py deleted file mode 100644 index 4b6ccbd..0000000 --- a/modelcache_mm/embedding/paddlenlp.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np - -from modelcache.embedding.base import BaseEmbedding -from modelcache.utils import import_paddlenlp, import_paddle - -import_paddle() -import_paddlenlp() - - -import paddle # pylint: disable=C0413 -from paddlenlp.transformers import AutoModel, AutoTokenizer # pylint: disable=C0413 - - -class PaddleNLP(BaseEmbedding): - def __init__(self, model: str = "ernie-3.0-medium-zh"): - self.model = AutoModel.from_pretrained(model) - self.model.eval() - - self.tokenizer = AutoTokenizer.from_pretrained(model) - if not self.tokenizer.pad_token: - self.tokenizer.pad_token = "" - self.__dimension = None - - def to_embeddings(self, data, **_): - """Generate embedding given text input - - :param data: text in string. - :type data: str - - :return: a text embedding in shape of (dim,). - """ - if not isinstance(data, list): - data = [data] - inputs = self.tokenizer( - data, padding=True, truncation=True, return_tensors="pd" - ) - outs = self.model(**inputs)[0] - emb = self.post_proc(outs, inputs).squeeze(0).detach().numpy() - return np.array(emb).astype("float32") - - def post_proc(self, token_embeddings, inputs): - attention_mask = paddle.ones(inputs["token_type_ids"].shape) - input_mask_expanded = ( - attention_mask.unsqueeze(-1).expand(token_embeddings.shape).astype("float32") - ) - sentence_embs = paddle.sum( - token_embeddings * input_mask_expanded, 1 - ) / paddle.clip(input_mask_expanded.sum(1), min=1e-9) - return sentence_embs - - @property - def dimension(self): - """Embedding dimension. - - :return: embedding dimension - """ - if not self.__dimension: - self.__dimension = len(self.to_embeddings("foo")) - return self.__dimension diff --git a/modelcache_mm/manager/data_manager.py b/modelcache_mm/manager/data_manager.py index 503dadb..c6dae19 100644 --- a/modelcache_mm/manager/data_manager.py +++ b/modelcache_mm/manager/data_manager.py @@ -328,20 +328,38 @@ def create_index(self, model, type, **kwargs): return self.v.create(model, type) def truncate(self, model_name): - # drop vector base data + # # drop vector base data + # try: + # vector_resp = self.v.rebuild_col(model_name) + # except Exception as e: + # return {'status': 'failed', 'VectorDB': 'truncate VectorDB data failed, please check! e: {}'.format(e), + # 'ScalarDB': 'unexecuted'} + # if vector_resp: + # return {'status': 'failed', 'VectorDB': vector_resp, 'ScalarDB': 'unexecuted'} + # # drop scalar base data + # try: + # delete_count = self.s.model_deleted(model_name) + # except Exception as e: + # return {'status': 'failed', 'VectorDB': 'rebuild', + # 'ScalarDB': 'truncate scalar data failed, please check! e: {}'.format(e)} + # return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} try: - vector_resp = self.v.rebuild_col(model_name) + resp = self.v.rebuild_idx(model_name) except Exception as e: - return {'status': 'failed', 'VectorDB': 'truncate VectorDB data failed, please check! e: {}'.format(e), + return {'status': 'failed', 'VectorDB': 'truncate VectorDB failed, please check! e: {}'.format(str(e)), 'ScalarDB': 'unexecuted'} - if vector_resp: - return {'status': 'failed', 'VectorDB': vector_resp, 'ScalarDB': 'unexecuted'} - # drop scalar base data + + if resp: + print('resp: {}'.format(resp)) + return {'status': 'failed', 'VectorDB': resp, 'ScalarDB': 'unexecuted'} + # drop ocean base model try: delete_count = self.s.model_deleted(model_name) except Exception as e: + # return 'truncate milvus data failed, please check!' return {'status': 'failed', 'VectorDB': 'rebuild', - 'ScalarDB': 'truncate scalar data failed, please check! e: {}'.format(e)} + 'ScalarDB': 'truncate scalardb data failed, please check! e: {}'.format(e)} + return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} def flush(self): diff --git a/modelcache_mm/manager/eviction/manager.py b/modelcache_mm/manager/eviction/manager.py deleted file mode 100644 index 61579f0..0000000 --- a/modelcache_mm/manager/eviction/manager.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -from typing import Callable, List, Any -from modelcache.utils.error import NotFoundError - - -class EvictionBase: - """ - EvictionBase to evict the cache data. - """ - - def __init__(self): - raise EnvironmentError( - "EvictionBase is designed to be instantiated, " - "please using the `EvictionBase.get(name, policy, maxsize, clean_size)`." - ) - - @staticmethod - def get(name: str, policy: str, maxsize: int, clean_size: int, on_evict: Callable[[List[Any]], None], **kwargs): - if name in "memory": - from modelcache.manager.eviction.memory_cache import MemoryCacheEviction - - eviction_base = MemoryCacheEviction(policy, maxsize, clean_size, on_evict, **kwargs) - else: - raise NotFoundError("eviction base", name) - return eviction_base diff --git a/modelcache_mm/manager/eviction/memory_cache.py b/modelcache_mm/manager/eviction/memory_cache.py deleted file mode 100644 index 3cb487f..0000000 --- a/modelcache_mm/manager/eviction/memory_cache.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -from typing import Any, Callable, List -import cachetools - -from modelcache.manager.eviction.base import EvictionBase - - -def popitem_wrapper(func, wrapper_func, clean_size): - def wrapper(*args, **kwargs): - keys = [] - try: - keys = [func(*args, **kwargs)[0] for _ in range(clean_size)] - except KeyError: - pass - wrapper_func(keys) - return wrapper - - -class MemoryCacheEviction(EvictionBase): - def __init__(self, policy: str, maxsize: int, clean_size: int, on_evict: Callable[[List[Any]], None], **kwargs): - self._policy = policy.upper() - if self._policy == "LRU": - self._cache = cachetools.LRUCache(maxsize=maxsize, **kwargs) - elif self._policy == "LFU": - self._cache = cachetools.LFUCache(maxsize=maxsize, **kwargs) - elif self._policy == "FIFO": - self._cache = cachetools.FIFOCache(maxsize=maxsize, **kwargs) - elif self._policy == "RR": - self._cache = cachetools.RRCache(maxsize=maxsize, **kwargs) - else: - raise ValueError(f"Unknown policy {policy}") - - self._cache.popitem = popitem_wrapper(self._cache.popitem, on_evict, clean_size) - - def put(self, objs: List[Any]): - for obj in objs: - self._cache[obj] = True - - def get(self, obj: Any): - return self._cache.get(obj) - - @property - def policy(self) -> str: - return self._policy diff --git a/modelcache_mm/manager/scalar_data/sql_storage.py b/modelcache_mm/manager/scalar_data/sql_storage.py index 8ddd5ff..fe7c7f1 100644 --- a/modelcache_mm/manager/scalar_data/sql_storage.py +++ b/modelcache_mm/manager/scalar_data/sql_storage.py @@ -101,7 +101,7 @@ def insert_query_resp(self, query_resp, **kwargs): if isinstance(hit_query, list): hit_query = json.dumps(hit_query, ensure_ascii=False) - table_name = "modelcache_query_log" + table_name = "open_cache_mm_query_log" insert_sql = "INSERT INTO {} (error_code, error_desc, cache_hit, model, query, delta_time, hit_query, answer) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)".format(table_name) conn = self.pool.connection() try: @@ -156,7 +156,7 @@ def get_data_by_id(self, key: int): return None def update_hit_count_by_id(self, primary_id: int): - table_name = "cache_codegpt_answer" + table_name = "open_cache_mm_answer" update_sql = "UPDATE {} SET hit_count = hit_count+1 WHERE id={}".format(table_name, primary_id) conn = self.pool.connection() @@ -174,7 +174,7 @@ def get_ids(self, deleted=True): pass def mark_deleted(self, keys): - table_name = "cache_codegpt_answer" + table_name = "open_cache_mm_answer" delete_sql = "Delete from {} WHERE id in ({})".format(table_name, ",".join([str(i) for i in keys])) # 从连接池中获取连接 @@ -191,8 +191,10 @@ def mark_deleted(self, keys): return delete_count def model_deleted(self, model_name): - table_name = "cache_codegpt_answer" + table_name = "open_cache_mm_answer" delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) + # print('delete_sql: {}'.format(delete_sql)) + # print('delete_sql begin') conn = self.pool.connection() # 使用连接执行删除数据操作 try: diff --git a/modelcache_mm/manager/vector_data/faiss.py b/modelcache_mm/manager/vector_data/faiss.py deleted file mode 100644 index f035ded..0000000 --- a/modelcache_mm/manager/vector_data/faiss.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -import os -from typing import List -import numpy as np -from modelcache.manager.vector_data.base import VectorBase, VectorData -from modelcache.utils import import_faiss -import_faiss() -import faiss # pylint: disable=C0413 - - -class Faiss(VectorBase): - def __init__(self, index_file_path, dimension, top_k): - self._index_file_path = index_file_path - self._dimension = dimension - self._index = faiss.index_factory(self._dimension, "IDMap,Flat", faiss.METRIC_L2) - self._top_k = top_k - if os.path.isfile(index_file_path): - self._index = faiss.read_index(index_file_path) - - def mul_add(self, datas: List[VectorData], model=None): - data_array, id_array = map(list, zip(*((data.data, data.id) for data in datas))) - np_data = np.array(data_array).astype("float32") - ids = np.array(id_array) - self._index.add_with_ids(np_data, ids) - - def search(self, data: np.ndarray, top_k: int = -1, model=None): - if self._index.ntotal == 0: - return None - if top_k == -1: - top_k = self._top_k - np_data = np.array(data).astype("float32").reshape(1, -1) - dist, ids = self._index.search(np_data, top_k) - ids = [int(i) for i in ids[0]] - return list(zip(dist[0], ids)) - - def rebuild_col(self, ids=None): - return True - - def rebuild(self, ids=None): - return True - - def delete(self, ids): - ids_to_remove = np.array(ids) - self._index.remove_ids(faiss.IDSelectorBatch(ids_to_remove.size, faiss.swig_ptr(ids_to_remove))) - - def flush(self): - faiss.write_index(self._index, self._index_file_path) - - def close(self): - self.flush() - - def count(self): - return self._index.ntotal diff --git a/modelcache_mm/manager/vector_data/milvus.py b/modelcache_mm/manager/vector_data/milvus.py deleted file mode 100644 index 50d6ab1..0000000 --- a/modelcache_mm/manager/vector_data/milvus.py +++ /dev/null @@ -1,189 +0,0 @@ -# -*- coding: utf-8 -*- -import logging -from typing import List -from uuid import uuid4 -import numpy as np -from modelcache.utils import import_pymilvus -from modelcache.utils.log import modelcache_log -from modelcache.manager.vector_data.base import VectorBase, VectorData - - -import_pymilvus() - -from pymilvus import ( # pylint: disable=C0413 - connections, - utility, - FieldSchema, - DataType, - CollectionSchema, - Collection, - MilvusException, -) - - -class Milvus(VectorBase): - SEARCH_PARAM = { - "IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}}, - "IVF_SQ8": {"metric_type": "L2", "params": {"nprobe": 10}}, - "IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}}, - "HNSW": {"metric_type": "L2", "params": {"ef": 10}}, - "RHNSW_FLAT": {"metric_type": "L2", "params": {"ef": 10}}, - "RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}}, - "RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}}, - "IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}}, - "ANNOY": {"metric_type": "L2", "params": {"search_k": 10}}, - "AUTOINDEX": {"metric_type": "L2", "params": {}}, - } - - def __init__( - self, - host: str = "localhost", - port: str = "19530", - user: str = "", - password: str = "", - secure: bool = False, - collection_name: str = "modelcache", - dimension: int = 0, - top_k: int = 1, - index_params: dict = None, - search_params: dict = None, - local_mode: bool = False, - local_data: str = "./milvus_data" - ): - if dimension <= 0: - raise ValueError( - f"invalid `dim` param: {dimension} in the Milvus vector store." - ) - self._local_mode = local_mode - self._local_data = local_data - self.dimension = dimension - self.top_k = top_k - self.index_params = index_params - if self._local_mode: - self._create_local(port, local_data) - self._connect(host, port, user, password, secure) - self.collection_name = collection_name - self.search_params = ( - search_params or self.SEARCH_PARAM[self.index_params["index_type"]] - ) - - def _connect(self, host, port, user, password, secure): - try: - i = [ - connections.get_connection_addr(x[0]) - for x in connections.list_connections() - ].index({"host": host, "port": port}) - self.alias = connections.list_connections()[i][0] - except ValueError: - # Connect to the Milvus instance using the passed in Environment variables - self.alias = uuid4().hex - connections.connect( - alias=self.alias, - host=host, - port=port, - user=user, # type: ignore - password=password, # type: ignore - secure=secure, - timeout=10 - ) - - def _create_collection(self, collection_name): - if not utility.has_collection(collection_name, using=self.alias): - schema = [ - FieldSchema( - name="id", - dtype=DataType.INT64, - is_primary=True, - auto_id=False, - ), - FieldSchema( - name="embedding", dtype=DataType.FLOAT_VECTOR, dim=self.dimension - ), - ] - schema = CollectionSchema(schema) - self.col = Collection( - collection_name, - schema=schema, - consistency_level="Session", - using=self.alias, - ) - else: - modelcache_log.warning("The %s collection already exists, and it will be used directly.", collection_name) - self.col = Collection( - collection_name, consistency_level="Session", using=self.alias - ) - - if len(self.col.indexes) == 0: - try: - modelcache_log.info("Attempting creation of Milvus index.") - self.col.create_index("embedding", index_params=self.index_params) - modelcache_log.info("Creation of Milvus index successful.") - except MilvusException as e: - modelcache_log.warning("Error with building index: %s, and attempting creation of default index.", e) - i_p = {"metric_type": "L2", "index_type": "AUTOINDEX", "params": {}} - self.col.create_index("embedding", index_params=i_p) - self.index_params = i_p - else: - self.index_params = self.col.indexes[0].to_dict()["index_param"] - - self.col.load() - - def _get_collection(self, collection_name): - self.col = Collection( - collection_name, consistency_level="Session", using=self.alias - ) - self.col.load() - - def mul_add(self, datas: List[VectorData], model=None): - collection_name_model = self.collection_name + '_' + model - self._create_collection(collection_name_model) - - data_array, id_array = map(list, zip(*((data.data, data.id) for data in datas))) - np_data = np.array(data_array).astype("float32") - entities = [id_array, np_data] - self.col.insert(entities) - - def search(self, data: np.ndarray, top_k: int = -1, model=None): - if top_k == -1: - top_k = self.top_k - collection_name_model = self.collection_name + '_' + model - self._create_collection(collection_name_model) - search_result = self.col.search( - data=data.reshape(1, -1).tolist(), - anns_field="embedding", - param=self.search_params, - limit=top_k, - ) - return list(zip(search_result[0].distances, search_result[0].ids)) - - def delete(self, ids, model=None): - collection_name_model = self.collection_name + '_' + model - self._get_collection(collection_name_model) - - del_ids = ",".join([str(x) for x in ids]) - resp = self.col.delete(f"id in [{del_ids}]") - delete_count = resp.delete_count - return delete_count - - def rebuild_col(self, model): - collection_name_model = self.collection_name + '_' + model - - # if col exist, drop col - if not utility.has_collection(collection_name_model, using=self.alias): - return 'model collection not found, please check!' - utility.drop_collection(collection_name_model, using=self.alias) - try: - self._create_collection(collection_name_model) - except Exception as e: - logging.info('create_collection: {}'.format(e)) - - def rebuild(self, ids=None): # pylint: disable=unused-argument - self.col.compact() - - def flush(self): - self.col.flush(_async=True) - - def close(self): - self.flush() - if self._local_mode: - self._server.stop() diff --git a/modelcache_mm/manager/vector_data/redis.py b/modelcache_mm/manager/vector_data/redis.py index 8295005..9c0cb2e 100644 --- a/modelcache_mm/manager/vector_data/redis.py +++ b/modelcache_mm/manager/vector_data/redis.py @@ -128,7 +128,6 @@ def create(self, model=None, mm_type=None): raise ValueError(str(e)) return 'success' - def rebuild(self, ids=None) -> bool: pass diff --git a/multicache_serving.py b/multicache_serving.py index 3564a3d..96f9561 100644 --- a/multicache_serving.py +++ b/multicache_serving.py @@ -274,10 +274,10 @@ def __health_check__(self): # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID}) # ============03 - request_type = 'remove' - scope = {"model": "test_0313"} - remove_type = 'truncate_by_model' - r1 = json.dumps({'request_type': request_type, 'scope': scope, 'remove_type': remove_type}) + # request_type = 'remove' + # scope = {"model": "test_0313"} + # remove_type = 'truncate_by_model' + # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'remove_type': remove_type}) # ============04 # request_type = 'register' From 1af0e9411a0f5b68b022e78a65e895e4dc56648a Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 18 Apr 2024 14:04:10 +0800 Subject: [PATCH 31/98] modify git ingore --- .gitignore | 2 +- multicache_serving.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index 94daa29..c6aa115 100644 --- a/.gitignore +++ b/.gitignore @@ -139,4 +139,4 @@ dmypy.json *.ini -multicache_serving.py \ No newline at end of file +**/multicache_serving.py \ No newline at end of file diff --git a/multicache_serving.py b/multicache_serving.py index 96f9561..ff1c730 100644 --- a/multicache_serving.py +++ b/multicache_serving.py @@ -263,15 +263,15 @@ def __health_check__(self): # r1 = json.dumps(data_dict) # ============02 - # request_type = 'query' - # UUID = str(uuid.uuid1()) + "==>" + str(time.time()) - # scope = {"model": "test_0313"} - # img_data = 'http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg' - # query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], - # 'imageRaw': '', - # 'imageUrl': img_data, - # 'multiType': 'IMG_TEXT'} - # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID}) + request_type = 'query' + UUID = str(uuid.uuid1()) + "==>" + str(time.time()) + scope = {"model": "test_0313"} + img_data = 'http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg' + query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], + 'imageRaw': '', + 'imageUrl': img_data, + 'multiType': 'IMG_TEXT'} + r1 = json.dumps({'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID}) # ============03 # request_type = 'remove' From f77ede02c07d2a03cafef85403e31fe5a2bbd271 Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 18 Apr 2024 14:10:52 +0800 Subject: [PATCH 32/98] update .gitignore --- multicache_serving.py | 290 ------------------------------------------ 1 file changed, 290 deletions(-) delete mode 100644 multicache_serving.py diff --git a/multicache_serving.py b/multicache_serving.py deleted file mode 100644 index ff1c730..0000000 --- a/multicache_serving.py +++ /dev/null @@ -1,290 +0,0 @@ -# -*- coding: utf-8 -*- -""" - Alipay.com Inc. - Copyright (c) 2004-2021 All Rights Reserved. - ------------------------------------------------------ - File Name : gptcache_serving.py - Author : fuhui.phe - Email: hongen.phe@antfin.com - Create Time : 2023/5/28 11:03 - Description : description what the main function of this file - Change Activity: - version0 : 2023/5/28 11:03 by fuhui.phe init -""" -from datetime import datetime -from typing import Dict -import time -import json -import uuid -import configparser -from concurrent.futures import ThreadPoolExecutor -from modelcache_mm import cache -from modelcache_mm.adapter import adapter -from modelcache_mm.manager import CacheBase, VectorBase, get_data_manager -from modelcache_mm.similarity_evaluation.distance import SearchDistanceEvaluation -from modelcache_mm.processor.pre import mm_insert_dict -from modelcache_mm.processor.pre import mm_query_dict -from modelcache_mm.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi -from modelcache_mm.maya_embedding_service.maya_multi_embedding_service import get_embedding_multi_concurrent_sin - - -def save_query_info(result, model, query, delta_time_log): - print('执行 save_query_info!') - cache.data_manager.save_query_resp(result, model=model, query=query, - delta_time=delta_time_log) - - -def response_text(cache_resp): - # print('cache_resp: {}'.format(cache_resp)) - return cache_resp['data'] - - -def response_hitquery(cache_resp): - # print('cache_resp: {}'.format(cache_resp)) - return cache_resp['hitQuery'] - - -# timm2vec = Timm() -# text2vec = Data2VecAudio() - - -# python类示例 -class UserBackend: - def __init__(self): - image_dimension = 768 - text_dimension = 768 - - mysql_config = configparser.ConfigParser() - mysql_config.read('modelcache_mm/config/mysql_config.ini') - - # milvus_config = configparser.ConfigParser() - # milvus_config.read('modelcache/config/milvus_config.ini') - - redis_config = configparser.ConfigParser() - redis_config.read('modelcache_mm/config/redis_config.ini') - - data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), - VectorBase("redis", mm_dimension=image_dimension+text_dimension, - i_dimension=image_dimension, t_dimension=text_dimension, - redis_config=redis_config)) - cache.init( - embedding_func=get_embedding_multi, - embedding_concurrent_func=get_embedding_multi_concurrent_sin, - data_manager=data_manager, - similarity_evaluation=SearchDistanceEvaluation(), - insert_pre_embedding_func=mm_insert_dict, - query_pre_embedding_func=mm_query_dict, - ) - self.gptcache_version = datetime.now().strftime("%Y-%m-%d %H:%M") - self.executor = ThreadPoolExecutor(max_workers=6) - - def __call__(self, param): - print('version: {}'.format(self.gptcache_version)) - print('call_time: {}'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) - try: - param_dict = json.loads(param) - except Exception as e: - result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', - "answer": ''} - cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) - return json.dumps(result) - - request_type = param_dict.get("request_type") - UUID = param_dict.get("UUID", None) - print('request_type: {}'.format(request_type)) - # param parsing - try: - scope = param_dict.get("scope") - print('scope: {}'.format(scope)) - if scope is not None: - model = scope.get('model') - model = model.replace('-', '_') - model = model.replace('.', '_') - print('model: {}'.format(model)) - - if request_type in ['query', 'insert']: - if request_type == 'query': - query = param_dict.get("query") - elif request_type == 'insert': - chat_info = param_dict.get("chat_info") - query = chat_info[-1]['query'] - - if request_type is None or request_type not in ['query', 'remove', 'insert', 'register']: - result = {"errorCode": 102, - "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", - "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - cache.data_manager.save_query_resp(result, model=model, query='', delta_time=0) - return json.dumps(result) - except Exception as e: - result = {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', - "answer": ''} - return json.dumps(result) - - if request_type == 'query': - if UUID: - try: - uuid_list = UUID.split('==>') - user_start = float(uuid_list[1]) - ray_http_cost = time.time()-user_start - print('ray_http_cost: {}'.format(ray_http_cost)) - except Exception as e: - print('uuid_e: {}'.format(e)) - try: - start_time = time.time() - response = adapter.ChatCompletion.create_query( - scope={"model": model}, - query=query, - ) - # print('response: {}'.format(response)) - delta_time = '{}s'.format(round(time.time() - start_time, 2)) - if response is None: - result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, - "hit_query": '', "answer": ''} - elif isinstance(response, dict): - answer = response_text(response) - hit_query = response_hitquery(response) - result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, - "hit_query": hit_query, "answer": answer} - else: - result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, - "hit_query": '', "answer": ''} - delta_time_log = round(time.time() - start_time, 3) - print('delta_time_log: {}'.format(delta_time_log)) - - # modify at 20230807 20:51 - future = self.executor.submit(save_query_info, result, model, query, delta_time_log) - query_time = round(time.time() - start_time, 2) - print('query_time: {}'.format(query_time)) - except Exception as e: - # result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, - # "hit_query": '', "answer": ''} - raise e - print('result: {}'.format(result)) - return json.dumps(result, ensure_ascii=False) - - if request_type == 'insert': - if UUID: - try: - uuid_list = UUID.split('==>') - user_start = float(uuid_list[1]) - ray_http_cost = time.time()-user_start - print('ray_http_cost: {}'.format(ray_http_cost)) - except Exception as e: - print('uuid_e: {}'.format(e)) - try: - start_time = time.time() - try: - response = adapter.ChatCompletion.create_insert( - model=model, - chat_info=chat_info, - ) - except Exception as e: - # result = {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} - # return json.dumps(result, ensure_ascii=False) - raise e - - if response == 'success': - result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} - else: - result = {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} - insert_time = round(time.time() - start_time, 2) - print('insert_time: {}'.format(insert_time)) - return json.dumps(result, ensure_ascii=False) - except Exception as e: - # result = {"errorCode": 304, "errorDesc": str(e), "writeStatus": "exception"} - # print('result: {}'.format(result)) - # return json.dumps(result, ensure_ascii=False) - raise e - - if request_type == 'remove': - remove_type = param_dict.get("remove_type") - id_list = param_dict.get("id_list", []) - print('remove_type: {}'.format(remove_type)) - - response = adapter.ChatCompletion.create_remove( - model=model, - remove_type=remove_type, - id_list=id_list - ) - - if not isinstance(response, dict): - result = {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} - return json.dumps(result) - - state = response.get('status') - # if response == 'success': - if state == 'success': - result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} - else: - result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} - return json.dumps(result) - - if request_type == 'register': - type = param_dict.get("type") - response = adapter.ChatCompletion.create_register( - model=model, - type=type - ) - if response in ['create_success', 'already_exists']: - result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} - else: - result = {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} - return json.dumps(result) - - def __update_config__(self, config: Dict[str, object]): - """ - 可选 - """ - pass - - def __health_check__(self): - """ - 可选 - """ - # logging.info(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) - return True - - -if __name__ == '__main__': - # ============01 - # request_type = 'insert' - # scope = {"model": "test_0313"} - # # UUID = "820b0052-d9d8-11ee-95f1-52775e3e6fd1" + "==>" + str(time.time()) - # UUID = str(uuid.uuid1()) + "==>" + str(time.time()) - # print('UUID: {}'.format(UUID)) - # img_data = "http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg" - # query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], - # 'imageRaw': '', - # 'imageUrl': img_data, - # 'imageId': 'ccc'} - # answer = "应该注意小孩不要跑到铁轨上" - # chat_info = [{"query": query, "answer": answer}] - # data_dict = {'request_type': request_type, 'scope': scope, 'chat_info': chat_info, 'UUID': UUID} - # r1 = json.dumps(data_dict) - - # ============02 - request_type = 'query' - UUID = str(uuid.uuid1()) + "==>" + str(time.time()) - scope = {"model": "test_0313"} - img_data = 'http://resarch.oss-cn-hangzhou-zmf.aliyuncs.com/transFile%2Ftmp%2FLMM_test_image_coco%2FCOCO_train2014_000000332345.jpg' - query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], - 'imageRaw': '', - 'imageUrl': img_data, - 'multiType': 'IMG_TEXT'} - r1 = json.dumps({'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID}) - - # ============03 - # request_type = 'remove' - # scope = {"model": "test_0313"} - # remove_type = 'truncate_by_model' - # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'remove_type': remove_type}) - - # ============04 - # request_type = 'register' - # scope = {"model": "test_0313"} - # type = 'IMG_TEXT' - # r1 = json.dumps({'request_type': request_type, 'scope': scope, 'type': type}) - - user_backend = UserBackend() - resp = user_backend(r1) - print('resp: {}'.format(resp)) From 1d34f352627578cfef48e6d7006f11a9759a37b8 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 1 May 2024 15:34:58 +0800 Subject: [PATCH 33/98] update vector data --- modelcache/manager/data_manager.py | 1 + modelcache/manager/scalar_data/sql_storage.py | 1 - modelcache/manager/vector_data/milvus.py | 2 ++ modelcache/manager/vector_data/redis.py | 3 ++- 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index d6637ea..861fabe 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -212,6 +212,7 @@ def import_data( cache_datas.append([ans, question, embedding_data, model]) ids = self.s.batch_insert(cache_datas) + print('ids: {}'.format(ids)) logging.info('ids: {}'.format(ids)) self.v.mul_add( [ diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index 23361d5..503217a 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -61,7 +61,6 @@ def _insert(self, data: List): def batch_insert(self, all_data: List[CacheData]): ids = [] for data in all_data: - print('data: {}'.format(data)) ids.append(self._insert(data)) return ids diff --git a/modelcache/manager/vector_data/milvus.py b/modelcache/manager/vector_data/milvus.py index 50d6ab1..de4bdb5 100644 --- a/modelcache/manager/vector_data/milvus.py +++ b/modelcache/manager/vector_data/milvus.py @@ -136,6 +136,7 @@ def _get_collection(self, collection_name): def mul_add(self, datas: List[VectorData], model=None): collection_name_model = self.collection_name + '_' + model + print('collection_name_model: {}'.format(collection_name_model)) self._create_collection(collection_name_model) data_array, id_array = map(list, zip(*((data.data, data.id) for data in datas))) @@ -147,6 +148,7 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): if top_k == -1: top_k = self.top_k collection_name_model = self.collection_name + '_' + model + print('collection_name_model: {}'.format(collection_name_model)) self._create_collection(collection_name_model) search_result = self.col.search( data=data.reshape(1, -1).tolist(), diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index b93893d..6e272f3 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -90,9 +90,9 @@ def mul_add(self, datas: List[VectorData], model=None): def search(self, data: np.ndarray, top_k: int = -1, model=None): index_name = get_index_name(model) + print('index_name: {}'.format(index_name)) id_field_name = "data_id" embedding_field_name = "data_vector" - base_query = f'*=>[KNN 2 @{embedding_field_name} $vector AS distance]' query = ( Query(base_query) @@ -107,6 +107,7 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): .search(query, query_params=query_params) .docs ) + print('results: {}'.format(results)) return [(float(result.distance), int(getattr(result, id_field_name))) for result in results] def rebuild(self, ids=None) -> bool: From a08a73971f8d03a3a6a97de290efc52e1960f320 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 15 May 2024 10:23:22 +0800 Subject: [PATCH 34/98] add clip model for encoder --- .gitignore | 5 +- model/clip_zh/__init__.py | 12 +++ modelcache/adapter/adapter_query.py | 7 +- modelcache/core.py | 2 +- modelcache/embedding/clip.py | 90 +++++++++++++++++++ modelcache/embedding/clip_demo.py | 49 ++++++++++ .../embedding/{string.py => string_text.py} | 0 .../embedding/{timm.py => timm_embedding.py} | 0 requirements.txt | 4 +- 9 files changed, 163 insertions(+), 6 deletions(-) create mode 100644 model/clip_zh/__init__.py create mode 100644 modelcache/embedding/clip.py create mode 100644 modelcache/embedding/clip_demo.py rename modelcache/embedding/{string.py => string_text.py} (100%) rename modelcache/embedding/{timm.py => timm_embedding.py} (100%) diff --git a/.gitignore b/.gitignore index c6aa115..cd9b201 100644 --- a/.gitignore +++ b/.gitignore @@ -139,4 +139,7 @@ dmypy.json *.ini -**/multicache_serving.py \ No newline at end of file +**/multicache_serving.py +**/modelcache_serving.py + +**/model/ \ No newline at end of file diff --git a/model/clip_zh/__init__.py b/model/clip_zh/__init__.py new file mode 100644 index 0000000..b5dea3f --- /dev/null +++ b/model/clip_zh/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" + Alipay.com Inc. + Copyright (c) 2004-2023 All Rights Reserved. + ------------------------------------------------------ + File Name : __init__.py.py + Author : fuhui.phe + Create Time : 2024/5/7 14:05 + Description : description what the main function of this file + Change Activity: + version0 : 2024/5/7 14:05 by fuhui.phe init +""" diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index 934c644..c32b7fe 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -30,10 +30,12 @@ def adapt_query(cache_data_convert, *args, **kwargs): report_func=chat_cache.report.embedding, )(pre_embedding_data) + # print('embedding_data: {}'.format(embedding_data)) + if cache_enable: cache_data_list = time_cal( chat_cache.data_manager.search, - func_name="milvus_search", + func_name="vector_search", report_func=chat_cache.report.search, )( embedding_data, @@ -41,6 +43,7 @@ def adapt_query(cache_data_convert, *args, **kwargs): top_k=kwargs.pop("top_k", -1), model=model ) + print('cache_data_list: {}'.format(cache_data_list)) cache_answers = [] cache_questions = [] cache_ids = [] @@ -78,8 +81,8 @@ def adapt_query(cache_data_convert, *args, **kwargs): return for cache_data in cache_data_list: + print('cache_data: {}'.format(cache_data)) primary_id = cache_data[1] - start_time = time.time() ret = chat_cache.data_manager.get_scalar_data( cache_data, extra_param=context.get("get_scalar_data", None) ) diff --git a/modelcache/core.py b/modelcache/core.py index f5484e3..bd57029 100644 --- a/modelcache/core.py +++ b/modelcache/core.py @@ -4,7 +4,7 @@ from modelcache.processor.post import first from modelcache.similarity_evaluation import ExactMatchEvaluation from modelcache.similarity_evaluation import SimilarityEvaluation -from modelcache.embedding.string import to_embeddings as string_embedding +from modelcache.embedding.string_text import to_embeddings as string_embedding from modelcache.report import Report from modelcache.config import Config from modelcache.utils.cache_func import cache_all diff --git a/modelcache/embedding/clip.py b/modelcache/embedding/clip.py new file mode 100644 index 0000000..cbe45ff --- /dev/null +++ b/modelcache/embedding/clip.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +import os +import torch +from modelcache.embedding.base import BaseEmbedding +from modelscope.utils.constant import Tasks +from modelscope.pipelines import pipeline +from modelscope.preprocessors.image import load_image + + +# def mean_pooling(model_output, attention_mask): +# token_embeddings = model_output[0] # First element of model_output contains all token embeddings +# input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() +# return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) + + +class ClipAudio(BaseEmbedding): + def __init__(self, model: str = "sentence-transformers/all-MiniLM-L6-v2"): + # current_dir = os.path.dirname(os.path.abspath(__file__)) + # parent_dir = os.path.dirname(current_dir) + # model_dir = os.path.dirname(parent_dir) + # model = os.path.join(model_dir, 'model/text2vec-base-chinese/') + + self.clip_pipeline = pipeline(task=Tasks.multi_modal_embedding, + model='damo/multi-modal_clip-vit-base-patch16_zh', model_revision='v1.0.1') + + self.__dimension = 1024 + + def to_embeddings(self, data_dict, **_): + text_list = data_dict['text'] + image_data = data_dict['image'] + + img_data = None + txt_data = None + + if image_data: + input_img = load_image(image_data) + # 2D Tensor, [图片数, 特征维度] + img_embedding = self.clip_pipeline.forward({'img': input_img})['img_embedding'].tolist()[0] if input_img else [] + print('img_embedding: {}'.format(img_embedding)) + else: + raise ValueError('image_data is None, please check!') + + if text_list and len(text_list) > 0: + # 2D Tensor, [文本数, 特征维度] + text_embedding = self.clip_pipeline.forward({'text': text_list})['text_embedding'].tolist()[0] if text_list else [] + print('text_embedding: {}'.format(text_embedding)) + else: + raise ValueError('text_list is None, please check!') + + return {'image_embedding': img_embedding, 'text_embeddings': text_embedding} + + # return {'image_embedding': img_feats, 'text_embeddings': txt_feats} + # input_texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"] + # input_img = load_image( + # 'https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg') + + # img_embedding = self.clip_pipeline.forward({'img': input_img})['img_embedding'] # 2D Tensor, [图片数, 特征维度] + # print('img_embedding: {}'.format(img_embedding)) + # text_embedding = self.clip_pipeline.forward({'text': input_texts})['text_embedding'] # 2D Tensor, [文本数, 特征维度] + + + # return embedding_array + + def post_proc(self, token_embeddings, inputs): + attention_mask = inputs["attention_mask"] + input_mask_expanded = ( + attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + ) + sentence_embs = torch.sum( + token_embeddings * input_mask_expanded, 1 + ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) + return sentence_embs + + @property + def dimension(self): + """Embedding dimension. + + :return: embedding dimension + """ + return self.__dimension + + +# if __name__ == '__main__': +# clip_vec = ClipAudio() +# text_list = ['hello', '你好'] +# text = ['###'.join(text_list)] +# image = 'https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg' +# data_dict = {'text': text, 'image': image} +# resp = clip_vec.to_embeddings(data_dict) +# print('resp: {}'.format(resp)) diff --git a/modelcache/embedding/clip_demo.py b/modelcache/embedding/clip_demo.py new file mode 100644 index 0000000..f6e157b --- /dev/null +++ b/modelcache/embedding/clip_demo.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +""" + Alipay.com Inc. + Copyright (c) 2004-2023 All Rights Reserved. + ------------------------------------------------------ + File Name : clip_demo.py + Author : fuhui.phe + Create Time : 2024/5/7 11:58 + Description : description what the main function of this file + Change Activity: + version0 : 2024/5/7 11:58 by fuhui.phe init +""" +import torch +from modelscope.utils.constant import Tasks +from modelscope.pipelines import pipeline +from modelscope.preprocessors.image import load_image + + +pipeline = pipeline(task=Tasks.multi_modal_embedding, + model='damo/multi-modal_clip-vit-base-patch16_zh', model_revision='v1.0.1') + +# pipeline = pipeline(task=Tasks.multi_modal_embedding, +# model='/Users/penghongen/PycharmProjects/CodeFuse-ModelCache/model/clip_zh', model_revision='v1.0.1') + +# pipeline = pipeline(task=Tasks.multi_modal_embedding, model='/Users/penghongen/PycharmProjects/CodeFuse-ModelCache/model/clip_zh') + + +input_img = load_image('https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg') # 支持皮卡丘示例图片路径/本地图片 返回PIL.Image + + +input_texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"] + +# 支持一张图片(PIL.Image)或多张图片(List[PIL.Image])输入,输出归一化特征向量 +img_embedding = pipeline.forward({'img': input_img})['img_embedding'] # 2D Tensor, [图片数, 特征维度] +print('img_embedding: {}'.format(img_embedding)) + +# 支持一条文本(str)或多条文本(List[str])输入,输出归一化特征向量 +text_embedding = pipeline.forward({'text': input_texts})['text_embedding'] # 2D Tensor, [文本数, 特征维度] + +# 计算图文相似度 +with torch.no_grad(): + # 计算内积得到logit,考虑模型temperature + logits_per_image = (img_embedding / pipeline.model.temperature) @ text_embedding.t() + # 根据logit计算概率分布 + probs = logits_per_image.softmax(dim=-1).cpu().numpy() + +print("图文匹配概率:", probs) + + diff --git a/modelcache/embedding/string.py b/modelcache/embedding/string_text.py similarity index 100% rename from modelcache/embedding/string.py rename to modelcache/embedding/string_text.py diff --git a/modelcache/embedding/timm.py b/modelcache/embedding/timm_embedding.py similarity index 100% rename from modelcache/embedding/timm.py rename to modelcache/embedding/timm_embedding.py diff --git a/requirements.txt b/requirements.txt index 3bf85e6..78d682e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ pymilvus==2.3.1 PyMySQL==1.1.0 Requests==2.31.0 torch==2.1.0 -transformers==4.34.1 +transformers==4.38.2 faiss-cpu==1.7.4 redis==5.0.1 - +modelscope==1.14.0 From e6cfcb8352dadc972e492f9a50a0bbc8ee69cc2f Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 15 May 2024 11:13:28 +0800 Subject: [PATCH 35/98] debug mmultimodel cache --- modelcache/embedding/clip_demo.py | 49 ------------------- modelcache_mm/adapter/adapter_insert.py | 19 +++---- modelcache_mm/adapter/adapter_query.py | 28 ++++++----- modelcache_mm/core.py | 3 -- modelcache_mm/embedding/__init__.py | 31 ++---------- .../embedding/clip.py | 12 ++--- 6 files changed, 33 insertions(+), 109 deletions(-) delete mode 100644 modelcache/embedding/clip_demo.py rename {modelcache => modelcache_mm}/embedding/clip.py (88%) diff --git a/modelcache/embedding/clip_demo.py b/modelcache/embedding/clip_demo.py deleted file mode 100644 index f6e157b..0000000 --- a/modelcache/embedding/clip_demo.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -""" - Alipay.com Inc. - Copyright (c) 2004-2023 All Rights Reserved. - ------------------------------------------------------ - File Name : clip_demo.py - Author : fuhui.phe - Create Time : 2024/5/7 11:58 - Description : description what the main function of this file - Change Activity: - version0 : 2024/5/7 11:58 by fuhui.phe init -""" -import torch -from modelscope.utils.constant import Tasks -from modelscope.pipelines import pipeline -from modelscope.preprocessors.image import load_image - - -pipeline = pipeline(task=Tasks.multi_modal_embedding, - model='damo/multi-modal_clip-vit-base-patch16_zh', model_revision='v1.0.1') - -# pipeline = pipeline(task=Tasks.multi_modal_embedding, -# model='/Users/penghongen/PycharmProjects/CodeFuse-ModelCache/model/clip_zh', model_revision='v1.0.1') - -# pipeline = pipeline(task=Tasks.multi_modal_embedding, model='/Users/penghongen/PycharmProjects/CodeFuse-ModelCache/model/clip_zh') - - -input_img = load_image('https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg') # 支持皮卡丘示例图片路径/本地图片 返回PIL.Image - - -input_texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"] - -# 支持一张图片(PIL.Image)或多张图片(List[PIL.Image])输入,输出归一化特征向量 -img_embedding = pipeline.forward({'img': input_img})['img_embedding'] # 2D Tensor, [图片数, 特征维度] -print('img_embedding: {}'.format(img_embedding)) - -# 支持一条文本(str)或多条文本(List[str])输入,输出归一化特征向量 -text_embedding = pipeline.forward({'text': input_texts})['text_embedding'] # 2D Tensor, [文本数, 特征维度] - -# 计算图文相似度 -with torch.no_grad(): - # 计算内积得到logit,考虑模型temperature - logits_per_image = (img_embedding / pipeline.model.temperature) @ text_embedding.t() - # 根据logit计算概率分布 - probs = logits_per_image.softmax(dim=-1).cpu().numpy() - -print("图文匹配概率:", probs) - - diff --git a/modelcache_mm/adapter/adapter_insert.py b/modelcache_mm/adapter/adapter_insert.py index 2d94798..0659710 100644 --- a/modelcache_mm/adapter/adapter_insert.py +++ b/modelcache_mm/adapter/adapter_insert.py @@ -38,12 +38,13 @@ def adapt_insert(*args, **kwargs): raise ValueError("Both pre_embedding_image_url and pre_embedding_image_raw cannot be non-empty at the same time.") if pre_embedding_image_url: - url_start_time = time.time() - response = requests.get(pre_embedding_image_url) - image_data = response.content - pre_embedding_image = base64.b64encode(image_data).decode('utf-8') - get_image_time = '{}s'.format(round(time.time() - url_start_time, 2)) - print('get_image_time: {}'.format(get_image_time)) + # url_start_time = time.time() + # response = requests.get(pre_embedding_image_url) + # image_data = response.content + # pre_embedding_image = base64.b64encode(image_data).decode('utf-8') + # get_image_time = '{}s'.format(round(time.time() - url_start_time, 2)) + # print('get_image_time: {}'.format(get_image_time)) + pre_embedding_image = pre_embedding_image_url elif pre_embedding_image_raw: pre_embedding_image = pre_embedding_image_raw else: @@ -70,9 +71,9 @@ def adapt_insert(*args, **kwargs): print('text_embeddings: {}'.format(text_embeddings)) if len(image_embeddings) > 0 and len(image_embeddings) > 0: - image_embedding = np.array(image_embeddings[0]) - text_embedding = text_embeddings[0] - embedding_data = np.concatenate((image_embedding, text_embedding)) + # image_embedding = np.array(image_embeddings[0]) + # text_embedding = text_embeddings[0] + embedding_data = np.concatenate((image_embeddings, text_embeddings)) mm_type = 'mm' elif len(image_embeddings) > 0: image_embedding = np.array(image_embeddings[0]) diff --git a/modelcache_mm/adapter/adapter_query.py b/modelcache_mm/adapter/adapter_query.py index 88a52c5..7eeeab5 100644 --- a/modelcache_mm/adapter/adapter_query.py +++ b/modelcache_mm/adapter/adapter_query.py @@ -30,8 +30,6 @@ def adapt_query(cache_data_convert, *args, **kwargs): pre_embedding_image_raw = pre_embedding_data_dict['imageRaw'] pre_embedding_image_url = pre_embedding_data_dict['imageUrl'] pre_multi_type = pre_embedding_data_dict['multiType'] - # print('pre_embedding_image_url: {}'.format(pre_embedding_image_url)) - # print('pre_embedding_text: {}'.format(pre_embedding_text)) # 判断逻辑 if pre_multi_type == 'IMG_TEXT': @@ -39,12 +37,13 @@ def adapt_query(cache_data_convert, *args, **kwargs): raise ValueError( "Both pre_embedding_imageUrl and pre_embedding_imageRaw cannot be non-empty at the same time.") if pre_embedding_image_url: - url_start_time = time.time() - response = requests.get(pre_embedding_image_url) - image_data = response.content - pre_embedding_image = base64.b64encode(image_data).decode('utf-8') - get_image_time = '{}s'.format(round(time.time() - url_start_time, 2)) - print('get_image_time: {}'.format(get_image_time)) + # url_start_time = time.time() + # response = requests.get(pre_embedding_image_url) + # image_data = response.content + # pre_embedding_image = base64.b64encode(image_data).decode('utf-8') + # get_image_time = '{}s'.format(round(time.time() - url_start_time, 2)) + # print('get_image_time: {}'.format(get_image_time)) + pre_embedding_image = pre_embedding_image_url elif pre_embedding_image_raw: pre_embedding_image = pre_embedding_image_raw else: @@ -63,7 +62,7 @@ def adapt_query(cache_data_convert, *args, **kwargs): if cache_enable: if pre_multi_type == 'IMG_TEXT': embedding_data_resp = time_cal( - chat_cache.embedding_concurrent_func, + chat_cache.embedding_func, func_name="iat_embedding", report_func=chat_cache.report.embedding, )(data_dict) @@ -76,10 +75,15 @@ def adapt_query(cache_data_convert, *args, **kwargs): image_embeddings = embedding_data_resp['image_embedding'] text_embeddings = embedding_data_resp['text_embeddings'] + print('image_embeddings: {}'.format(image_embeddings)) + print('image_embeddings_len: {}'.format(len(image_embeddings))) + print('text_embeddings: {}'.format(text_embeddings)) + print('text_embeddings_len: {}'.format(len(text_embeddings))) + if len(image_embeddings) > 0 and len(image_embeddings) > 0: - image_embedding = np.array(image_embeddings[0]) - text_embedding = np.array(text_embeddings[0]) - embedding_data = np.concatenate((image_embedding, text_embedding)) + # image_embedding = np.array(image_embeddings[0]) + # text_embedding = np.array(text_embeddings[0]) + embedding_data = np.concatenate((image_embeddings, text_embeddings)) mm_type = 'mm' elif len(image_embeddings) > 0: image_embedding = np.array(image_embeddings[0]) diff --git a/modelcache_mm/core.py b/modelcache_mm/core.py index 2c6c037..d6ff6fb 100644 --- a/modelcache_mm/core.py +++ b/modelcache_mm/core.py @@ -20,7 +20,6 @@ def __init__(self): self.query_pre_embedding_func = None self.insert_pre_embedding_func = None self.embedding_func = None - self.embedding_concurrent_func = None self.data_manager: Optional[DataManager] = None self.similarity_evaluation: Optional[SimilarityEvaluation] = None self.post_process_messages_func = None @@ -34,7 +33,6 @@ def init( query_pre_embedding_func=None, insert_pre_embedding_func=None, embedding_func=string_embedding, - embedding_concurrent_func=string_embedding, data_manager: DataManager = get_data_manager(), similarity_evaluation=ExactMatchEvaluation(), post_process_messages_func=first, @@ -46,7 +44,6 @@ def init( self.query_pre_embedding_func = query_pre_embedding_func self.insert_pre_embedding_func = insert_pre_embedding_func self.embedding_func = embedding_func - self.embedding_concurrent_func = embedding_concurrent_func self.data_manager: DataManager = data_manager self.similarity_evaluation = similarity_evaluation self.post_process_messages_func = post_process_messages_func diff --git a/modelcache_mm/embedding/__init__.py b/modelcache_mm/embedding/__init__.py index 03b6762..1275963 100644 --- a/modelcache_mm/embedding/__init__.py +++ b/modelcache_mm/embedding/__init__.py @@ -1,32 +1,7 @@ # -*- coding: utf-8 -*- from modelcache.utils.lazy_import import LazyImport -huggingface = LazyImport("huggingface", globals(), "modelcache.embedding.huggingface") -data2vec = LazyImport("data2vec", globals(), "modelcache.embedding.data2vec") -llmEmb = LazyImport("llmEmb", globals(), "modelcache.embedding.llmEmb") -fasttext = LazyImport("fasttext", globals(), "modelcache.embedding.fasttext") -paddlenlp = LazyImport("paddlenlp", globals(), "modelcache.embedding.paddlenlp") -timm = LazyImport("timm", globals(), "modelcache.embedding.timm") +clip = LazyImport("clip", globals(), "modelcache_mm.embedding.clip") -def Huggingface(model="sentence-transformers/all-mpnet-base-v2"): - return huggingface.Huggingface(model) - - -def Data2VecAudio(model="facebook/data2vec-audio-base-960h"): - return data2vec.Data2VecAudio(model) - - -def LlmEmb2vecAudio(): - return llmEmb.LlmEmb2Vec() - - -def FastText(model="en", dim=None): - return fasttext.FastText(model, dim) - - -def PaddleNLP(model="ernie-3.0-medium-zh"): - return paddlenlp.PaddleNLP(model) - - -def Timm(model="resnet50", device="default"): - return timm.Timm(model, device) +def Clip2Vec(model="damo/multi-modal_clip-vit-base-patch16_zh"): + return clip.ClipAudio(model) diff --git a/modelcache/embedding/clip.py b/modelcache_mm/embedding/clip.py similarity index 88% rename from modelcache/embedding/clip.py rename to modelcache_mm/embedding/clip.py index cbe45ff..d718e43 100644 --- a/modelcache/embedding/clip.py +++ b/modelcache_mm/embedding/clip.py @@ -14,15 +14,11 @@ class ClipAudio(BaseEmbedding): - def __init__(self, model: str = "sentence-transformers/all-MiniLM-L6-v2"): - # current_dir = os.path.dirname(os.path.abspath(__file__)) - # parent_dir = os.path.dirname(current_dir) - # model_dir = os.path.dirname(parent_dir) - # model = os.path.join(model_dir, 'model/text2vec-base-chinese/') - + def __init__(self, model: str = 'damo/multi-modal_clip-vit-base-patch16_zh'): + self.model = model + self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.clip_pipeline = pipeline(task=Tasks.multi_modal_embedding, - model='damo/multi-modal_clip-vit-base-patch16_zh', model_revision='v1.0.1') - + model=model, model_revision='v1.0.1') self.__dimension = 1024 def to_embeddings(self, data_dict, **_): From 215b215181a48255eaab798e313473523b10c78f Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 15 May 2024 13:47:52 +0800 Subject: [PATCH 36/98] removed print statements and standardized formatting --- examples/flask/data_insert.py | 1 - examples/flask/data_query.py | 1 - examples/flask/data_query_long.py | 1 - examples/flask/register.py | 1 - model/clip_zh/__init__.py | 11 --- modelcache/adapter/adapter_query.py | 4 - modelcache/adapter/adapter_register.py | 1 - modelcache/manager/data_manager.py | 2 - .../manager/scalar_data/sql_storage_sqlite.py | 64 --------------- modelcache/manager/vector_data/milvus.py | 2 - modelcache/manager/vector_data/redis.py | 3 - modelcache/utils/index_util.py | 25 ------ modelcache_mm/adapter/adapter.py | 1 + modelcache_mm/adapter/adapter_insert.py | 16 +--- modelcache_mm/adapter/adapter_query.py | 30 +------ modelcache_mm/adapter/adapter_register.py | 3 - modelcache_mm/adapter/adapter_remove.py | 1 - modelcache_mm/embedding/clip.py | 33 -------- modelcache_mm/embedding/timm.py | 11 ++- modelcache_mm/manager/data_manager.py | 81 +------------------ modelcache_mm/manager/eviction_manager.py | 9 --- .../manager/scalar_data/sql_storage.py | 16 ---- .../manager/scalar_data/sql_storage_sqlite.py | 65 --------------- modelcache_mm/manager/vector_data/manager.py | 1 - modelcache_mm/manager/vector_data/redis.py | 2 - modelcache_mm/processor/pre.py | 6 -- .../similarity_evaluation/exact_match.py | 1 - modelcache_mm/utils/index_util.py | 2 - modelcache_mm/utils/lazy_import.py | 1 - modelcache_mm/utils/model_filter.py | 15 ---- 30 files changed, 10 insertions(+), 400 deletions(-) delete mode 100644 modelcache_mm/utils/model_filter.py diff --git a/examples/flask/data_insert.py b/examples/flask/data_insert.py index 094082d..52d1405 100644 --- a/examples/flask/data_insert.py +++ b/examples/flask/data_insert.py @@ -13,7 +13,6 @@ def run(): headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) res_text = res.text - print('res_text: {}'.format(res_text)) if __name__ == '__main__': diff --git a/examples/flask/data_query.py b/examples/flask/data_query.py index 3eccae3..aa59ae3 100644 --- a/examples/flask/data_query.py +++ b/examples/flask/data_query.py @@ -13,7 +13,6 @@ def run(): headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) res_text = res.text - print('res_text: {}'.format(res_text)) if __name__ == '__main__': diff --git a/examples/flask/data_query_long.py b/examples/flask/data_query_long.py index 2ebfcc8..e8c29f8 100644 --- a/examples/flask/data_query_long.py +++ b/examples/flask/data_query_long.py @@ -18,7 +18,6 @@ def run(): headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) res_text = res.text - print('res_text: {}'.format(res_text)) if __name__ == '__main__': diff --git a/examples/flask/register.py b/examples/flask/register.py index 737b495..23f0849 100644 --- a/examples/flask/register.py +++ b/examples/flask/register.py @@ -14,7 +14,6 @@ def run(): headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) res_text = res.text - print('res_text: {}'.format(res_text)) if __name__ == '__main__': diff --git a/model/clip_zh/__init__.py b/model/clip_zh/__init__.py index b5dea3f..40a96af 100644 --- a/model/clip_zh/__init__.py +++ b/model/clip_zh/__init__.py @@ -1,12 +1 @@ # -*- coding: utf-8 -*- -""" - Alipay.com Inc. - Copyright (c) 2004-2023 All Rights Reserved. - ------------------------------------------------------ - File Name : __init__.py.py - Author : fuhui.phe - Create Time : 2024/5/7 14:05 - Description : description what the main function of this file - Change Activity: - version0 : 2024/5/7 14:05 by fuhui.phe init -""" diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index c32b7fe..8f76bff 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -30,8 +30,6 @@ def adapt_query(cache_data_convert, *args, **kwargs): report_func=chat_cache.report.embedding, )(pre_embedding_data) - # print('embedding_data: {}'.format(embedding_data)) - if cache_enable: cache_data_list = time_cal( chat_cache.data_manager.search, @@ -43,7 +41,6 @@ def adapt_query(cache_data_convert, *args, **kwargs): top_k=kwargs.pop("top_k", -1), model=model ) - print('cache_data_list: {}'.format(cache_data_list)) cache_answers = [] cache_questions = [] cache_ids = [] @@ -81,7 +78,6 @@ def adapt_query(cache_data_convert, *args, **kwargs): return for cache_data in cache_data_list: - print('cache_data: {}'.format(cache_data)) primary_id = cache_data[1] ret = chat_cache.data_manager.get_scalar_data( cache_data, extra_param=context.get("get_scalar_data", None) diff --git a/modelcache/adapter/adapter_register.py b/modelcache/adapter/adapter_register.py index 53df128..dafe597 100644 --- a/modelcache/adapter/adapter_register.py +++ b/modelcache/adapter/adapter_register.py @@ -9,5 +9,4 @@ def adapt_register(*args, **kwargs): return ValueError('') register_resp = chat_cache.data_manager.create_index(model) - print('register_resp: {}'.format(register_resp)) return register_resp diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index 861fabe..6734ef2 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -212,8 +212,6 @@ def import_data( cache_datas.append([ans, question, embedding_data, model]) ids = self.s.batch_insert(cache_datas) - print('ids: {}'.format(ids)) - logging.info('ids: {}'.format(ids)) self.v.mul_add( [ VectorData(id=ids[i], data=embedding_data) diff --git a/modelcache/manager/scalar_data/sql_storage_sqlite.py b/modelcache/manager/scalar_data/sql_storage_sqlite.py index 495fbf7..d4febdc 100644 --- a/modelcache/manager/scalar_data/sql_storage_sqlite.py +++ b/modelcache/manager/scalar_data/sql_storage_sqlite.py @@ -1,74 +1,10 @@ # -*- coding: utf-8 -*- -import os -import time - -import pymysql import json -import base64 from typing import List from modelcache.manager.scalar_data.base import CacheStorage, CacheData import sqlite3 -# def insert_single_data(conn, sql, data_tuple): -# cur = conn.cursor() -# try: -# cur.execute(sql, data_tuple) -# conn.commit() -# id = cur.lastrowid -# # print('id: {}'.format(id)) -# return id -# except Exception as e: -# print(e) -# conn.rollback() -# if cur: -# cur.close() -# -# -# def excute_sql(conn, sql): -# cur = conn.cursor() -# try: -# cur.execute(sql) -# conn.commit() -# except Exception as e: -# print(e) -# conn.rollback() -# if cur: -# cur.close() -# -# -# def excute_delete_sql(conn, sql): -# cur = conn.cursor() -# try: -# cur.execute(sql) -# row_count = cur.rowcount -# conn.commit() -# except Exception as e: -# print(e) -# conn.rollback() -# if cur: -# cur.close() -# return row_count -# -# -# def query_fetch_one_data(conn, sql): -# cursor = conn.cursor() -# try: -# cursor.execute(sql) -# except Exception as e: -# print(e) -# conn.rollback() -# rows = cursor.fetchone() -# if cursor: -# cursor.close() -# return rows -# -# -# def close(conn): -# if conn: -# conn.close() - - class SQLStorage(CacheStorage): def __init__( self, diff --git a/modelcache/manager/vector_data/milvus.py b/modelcache/manager/vector_data/milvus.py index de4bdb5..50d6ab1 100644 --- a/modelcache/manager/vector_data/milvus.py +++ b/modelcache/manager/vector_data/milvus.py @@ -136,7 +136,6 @@ def _get_collection(self, collection_name): def mul_add(self, datas: List[VectorData], model=None): collection_name_model = self.collection_name + '_' + model - print('collection_name_model: {}'.format(collection_name_model)) self._create_collection(collection_name_model) data_array, id_array = map(list, zip(*((data.data, data.id) for data in datas))) @@ -148,7 +147,6 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): if top_k == -1: top_k = self.top_k collection_name_model = self.collection_name + '_' + model - print('collection_name_model: {}'.format(collection_name_model)) self._create_collection(collection_name_model) search_result = self.col.search( data=data.reshape(1, -1).tolist(), diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index 6e272f3..afa1088 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -49,7 +49,6 @@ def _check_index_exists(self, index_name: str) -> bool: def create_index(self, index_name, index_prefix): dimension = self.dimension - print('dimension: {}'.format(dimension)) if self._check_index_exists(index_name): modelcache_log.info( "The %s already exists, and it will be used directly", index_name @@ -90,7 +89,6 @@ def mul_add(self, datas: List[VectorData], model=None): def search(self, data: np.ndarray, top_k: int = -1, model=None): index_name = get_index_name(model) - print('index_name: {}'.format(index_name)) id_field_name = "data_id" embedding_field_name = "data_vector" base_query = f'*=>[KNN 2 @{embedding_field_name} $vector AS distance]' @@ -107,7 +105,6 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): .search(query, query_params=query_params) .docs ) - print('results: {}'.format(results)) return [(float(result.distance), int(getattr(result, id_field_name))) for result in results] def rebuild(self, ids=None) -> bool: diff --git a/modelcache/utils/index_util.py b/modelcache/utils/index_util.py index 2a1cfca..be6e856 100644 --- a/modelcache/utils/index_util.py +++ b/modelcache/utils/index_util.py @@ -7,28 +7,3 @@ def get_index_name(model): def get_index_prefix(model): return 'prefix' + '_' + model - - -# def get_mm_index_name(model, mm_type): -# print('mm_type: {}'.format(mm_type)) -# if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: -# raise ValueError('mm_type is not normal!') -# if mm_type == 'IMG_TEXT': -# mm_type = 'mm' -# elif mm_type == 'IMG': -# mm_type = 'image' -# elif mm_type == 'TEXT': -# mm_type = 'text' -# return 'multicache' + '_' + model + '_' + mm_type -# -# -# def get_mm_index_prefix(model, mm_type): -# if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: -# raise ValueError('iat_type is not normal!') -# if mm_type == 'IMG_TEXT': -# mm_type = 'mm' -# elif mm_type == 'IMG': -# mm_type = 'image' -# elif mm_type == 'TEXT': -# mm_type = 'text' -# return 'prefix' + '_' + model + '_' + mm_type diff --git a/modelcache_mm/adapter/adapter.py b/modelcache_mm/adapter/adapter.py index 722a8db..44c87a3 100644 --- a/modelcache_mm/adapter/adapter.py +++ b/modelcache_mm/adapter/adapter.py @@ -22,6 +22,7 @@ def cache_data_convert(cache_data, cache_query): except Exception as e: # return str(e) raise e + @classmethod def create_insert(cls, *args, **kwargs): try: diff --git a/modelcache_mm/adapter/adapter_insert.py b/modelcache_mm/adapter/adapter_insert.py index 0659710..89933bc 100644 --- a/modelcache_mm/adapter/adapter_insert.py +++ b/modelcache_mm/adapter/adapter_insert.py @@ -1,7 +1,4 @@ # -*- coding: utf-8 -*- -import time -import requests -import base64 import numpy as np from modelcache_mm import cache from modelcache_mm.utils.error import NotInitError @@ -25,7 +22,6 @@ def adapt_insert(*args, **kwargs): prompts=chat_cache.config.prompts, ) - print('pre_embedding_data_dict: {}'.format(pre_embedding_data_dict)) chat_info = kwargs.pop("chat_info", []) llm_data = chat_info[-1]['answer'] @@ -38,12 +34,6 @@ def adapt_insert(*args, **kwargs): raise ValueError("Both pre_embedding_image_url and pre_embedding_image_raw cannot be non-empty at the same time.") if pre_embedding_image_url: - # url_start_time = time.time() - # response = requests.get(pre_embedding_image_url) - # image_data = response.content - # pre_embedding_image = base64.b64encode(image_data).decode('utf-8') - # get_image_time = '{}s'.format(round(time.time() - url_start_time, 2)) - # print('get_image_time: {}'.format(get_image_time)) pre_embedding_image = pre_embedding_image_url elif pre_embedding_image_raw: pre_embedding_image = pre_embedding_image_raw @@ -67,9 +57,6 @@ def adapt_insert(*args, **kwargs): image_embeddings = embedding_data_resp['image_embedding'] text_embeddings = embedding_data_resp['text_embeddings'] - print('image_embeddings: {}'.format(image_embeddings)) - print('text_embeddings: {}'.format(text_embeddings)) - if len(image_embeddings) > 0 and len(image_embeddings) > 0: # image_embedding = np.array(image_embeddings[0]) # text_embedding = text_embeddings[0] @@ -85,7 +72,6 @@ def adapt_insert(*args, **kwargs): mm_type = 'text' else: raise ValueError('maya embedding service return both empty list, please check!') - print('embedding_data: {}'.format(embedding_data)) chat_cache.data_manager.save( pre_embedding_text, pre_embedding_image_url, @@ -96,4 +82,4 @@ def adapt_insert(*args, **kwargs): mm_type=mm_type, extra_param=context.get("mm_save_func", None) ) - return 'success' \ No newline at end of file + return 'success' diff --git a/modelcache_mm/adapter/adapter_query.py b/modelcache_mm/adapter/adapter_query.py index 7eeeab5..776f076 100644 --- a/modelcache_mm/adapter/adapter_query.py +++ b/modelcache_mm/adapter/adapter_query.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- -import time -import requests +import logging import numpy as np -import base64 from modelcache_mm import cache from modelcache_mm.utils.error import NotInitError from modelcache_mm.utils.error import MultiTypeError @@ -37,12 +35,6 @@ def adapt_query(cache_data_convert, *args, **kwargs): raise ValueError( "Both pre_embedding_imageUrl and pre_embedding_imageRaw cannot be non-empty at the same time.") if pre_embedding_image_url: - # url_start_time = time.time() - # response = requests.get(pre_embedding_image_url) - # image_data = response.content - # pre_embedding_image = base64.b64encode(image_data).decode('utf-8') - # get_image_time = '{}s'.format(round(time.time() - url_start_time, 2)) - # print('get_image_time: {}'.format(get_image_time)) pre_embedding_image = pre_embedding_image_url elif pre_embedding_image_raw: pre_embedding_image = pre_embedding_image_raw @@ -50,12 +42,10 @@ def adapt_query(cache_data_convert, *args, **kwargs): raise ValueError( "Both pre_embedding_imageUrl and pre_embedding_imageRaw are empty. Please provide at least one.") data_dict = {'text': [pre_embedding_text], 'image': pre_embedding_image} - # print('data_dict: {}'.format(data_dict)) elif pre_multi_type == 'TEXT': data_dict = {'text': [pre_embedding_text], 'image': None} else: raise MultiTypeError - # print('data_dict: {}'.format(data_dict)) embedding_data = None mm_type = None @@ -75,14 +65,7 @@ def adapt_query(cache_data_convert, *args, **kwargs): image_embeddings = embedding_data_resp['image_embedding'] text_embeddings = embedding_data_resp['text_embeddings'] - print('image_embeddings: {}'.format(image_embeddings)) - print('image_embeddings_len: {}'.format(len(image_embeddings))) - print('text_embeddings: {}'.format(text_embeddings)) - print('text_embeddings_len: {}'.format(len(text_embeddings))) - if len(image_embeddings) > 0 and len(image_embeddings) > 0: - # image_embedding = np.array(image_embeddings[0]) - # text_embedding = np.array(text_embeddings[0]) embedding_data = np.concatenate((image_embeddings, text_embeddings)) mm_type = 'mm' elif len(image_embeddings) > 0: @@ -144,14 +127,10 @@ def adapt_query(cache_data_convert, *args, **kwargs): cache_data_dict, extra_param=context.get("evaluation_func", None), ) - - print('rank_pre: {}'.format(rank_pre)) - print('rank_threshold: {}'.format(rank_threshold)) if rank_pre < rank_threshold: return for cache_data in cache_data_list: - print('cache_data: {}'.format(cache_data)) primary_id = cache_data[1] ret = chat_cache.data_manager.get_scalar_data( cache_data, extra_param=context.get("get_scalar_data", None) @@ -189,9 +168,6 @@ def adapt_query(cache_data_convert, *args, **kwargs): eval_cache_data, extra_param=context.get("evaluation_func", None), ) - print('rank_threshold: {}'.format(rank_threshold)) - print('rank_threshold_long: {}'.format(rank_threshold_long)) - print('rank: {}'.format(rank)) if len(pre_embedding_text) <= 50: if rank_threshold <= rank: @@ -214,8 +190,6 @@ def adapt_query(cache_data_convert, *args, **kwargs): cache_questions = sorted(cache_questions, key=lambda x: x[0], reverse=True) cache_ids = sorted(cache_ids, key=lambda x: x[0], reverse=True) - print('cache_answers: {}'.format(cache_answers)) - if len(cache_answers) != 0: return_message = chat_cache.post_process_messages_func( [t[1] for t in cache_answers] @@ -236,7 +210,7 @@ def adapt_query(cache_data_convert, *args, **kwargs): try: chat_cache.data_manager.update_hit_count(return_id) except Exception: - print('update_hit_count except, please check!') + logging.warning('update_hit_count except, please check!') chat_cache.report.hint_cache() return_query_dict = {"image_url": return_image_url, "image_id": return_image_id, "question": return_query} diff --git a/modelcache_mm/adapter/adapter_register.py b/modelcache_mm/adapter/adapter_register.py index 934d9bb..8983748 100644 --- a/modelcache_mm/adapter/adapter_register.py +++ b/modelcache_mm/adapter/adapter_register.py @@ -9,8 +9,5 @@ def adapt_register(*args, **kwargs): if model is None or len(model) == 0: return ValueError('') - print('type: {}'.format(type)) - print('model: {}'.format(model)) register_resp = chat_cache.data_manager.create_index(model, type) - print('register_resp: {}'.format(register_resp)) return register_resp diff --git a/modelcache_mm/adapter/adapter_remove.py b/modelcache_mm/adapter/adapter_remove.py index 746a4ab..b6b1f92 100644 --- a/modelcache_mm/adapter/adapter_remove.py +++ b/modelcache_mm/adapter/adapter_remove.py @@ -16,7 +16,6 @@ def adapt_remove(*args, **kwargs): # delete data if remove_type == 'delete_by_id': id_list = kwargs.pop("id_list", []) - print('id_list: {}'.format(id_list)) resp = chat_cache.data_manager.delete(id_list, model=model) elif remove_type == 'truncate_by_model': resp = chat_cache.data_manager.truncate(model) diff --git a/modelcache_mm/embedding/clip.py b/modelcache_mm/embedding/clip.py index d718e43..d756f23 100644 --- a/modelcache_mm/embedding/clip.py +++ b/modelcache_mm/embedding/clip.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -import os import torch from modelcache.embedding.base import BaseEmbedding from modelscope.utils.constant import Tasks @@ -7,12 +6,6 @@ from modelscope.preprocessors.image import load_image -# def mean_pooling(model_output, attention_mask): -# token_embeddings = model_output[0] # First element of model_output contains all token embeddings -# input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() -# return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) - - class ClipAudio(BaseEmbedding): def __init__(self, model: str = 'damo/multi-modal_clip-vit-base-patch16_zh'): self.model = model @@ -30,33 +23,17 @@ def to_embeddings(self, data_dict, **_): if image_data: input_img = load_image(image_data) - # 2D Tensor, [图片数, 特征维度] img_embedding = self.clip_pipeline.forward({'img': input_img})['img_embedding'].tolist()[0] if input_img else [] - print('img_embedding: {}'.format(img_embedding)) else: raise ValueError('image_data is None, please check!') if text_list and len(text_list) > 0: - # 2D Tensor, [文本数, 特征维度] text_embedding = self.clip_pipeline.forward({'text': text_list})['text_embedding'].tolist()[0] if text_list else [] - print('text_embedding: {}'.format(text_embedding)) else: raise ValueError('text_list is None, please check!') return {'image_embedding': img_embedding, 'text_embeddings': text_embedding} - # return {'image_embedding': img_feats, 'text_embeddings': txt_feats} - # input_texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"] - # input_img = load_image( - # 'https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg') - - # img_embedding = self.clip_pipeline.forward({'img': input_img})['img_embedding'] # 2D Tensor, [图片数, 特征维度] - # print('img_embedding: {}'.format(img_embedding)) - # text_embedding = self.clip_pipeline.forward({'text': input_texts})['text_embedding'] # 2D Tensor, [文本数, 特征维度] - - - # return embedding_array - def post_proc(self, token_embeddings, inputs): attention_mask = inputs["attention_mask"] input_mask_expanded = ( @@ -74,13 +51,3 @@ def dimension(self): :return: embedding dimension """ return self.__dimension - - -# if __name__ == '__main__': -# clip_vec = ClipAudio() -# text_list = ['hello', '你好'] -# text = ['###'.join(text_list)] -# image = 'https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg' -# data_dict = {'text': text, 'image': image} -# resp = clip_vec.to_embeddings(data_dict) -# print('resp: {}'.format(resp)) diff --git a/modelcache_mm/embedding/timm.py b/modelcache_mm/embedding/timm.py index 5241e03..e5c9f48 100644 --- a/modelcache_mm/embedding/timm.py +++ b/modelcache_mm/embedding/timm.py @@ -8,10 +8,10 @@ import_timm() import_pillow() -import torch # pylint: disable=C0413 -from timm.models import create_model # pylint: disable=C0413 -from timm.data import create_transform, resolve_data_config # pylint: disable=C0413 -from PIL import Image # pylint: disable=C0413 +import torch +from timm.models import create_model +from timm.data import create_transform, resolve_data_config +from PIL import Image class Timm(BaseEmbedding): @@ -26,7 +26,7 @@ def __init__(self, model: str = "resnet18", device: str = "default"): try: self.__dimension = self.model.embed_dim - except Exception: # pylint: disable=W0703 + except Exception: self.__dimension = None def to_embeddings(self, data, skip_preprocess: bool = False, **_): @@ -61,7 +61,6 @@ def preprocess(self, image_path): @property def dimension(self): """Embedding dimension. - :return: embedding dimension """ if not self.__dimension: diff --git a/modelcache_mm/manager/data_manager.py b/modelcache_mm/manager/data_manager.py index c6dae19..875f1a5 100644 --- a/modelcache_mm/manager/data_manager.py +++ b/modelcache_mm/manager/data_manager.py @@ -17,8 +17,6 @@ from modelcache_mm.utils.error import CacheError, ParamError from modelcache_mm.manager.vector_data.base import VectorBase, VectorData from modelcache_mm.manager.object_data.base import ObjectBase -# from modelcache.manager.eviction import EvictionBase -# from modelcache.manager.eviction_manager import EvictionManager from modelcache_mm.utils.log import modelcache_log @@ -91,9 +89,6 @@ def init(self): ) def save(self, text, image_url, image_id, answer, embedding, **kwargs): - # if isinstance(question, Question): - # question = question.content - # self.data[embedding_data] = (question, answer, embedding_data) pass def save_query_resp(self, query_resp_dict, **kwargs): @@ -103,10 +98,6 @@ def import_data( self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], embeddings: List[Any], model: Any, iat_type: Any ): - # if len(questions) != len(answers) or len(questions) != len(embedding_datas): - # raise ParamError("Make sure that all parameters have the same length") - # for i, embedding_data in enumerate(embedding_datas): - # self.data[embedding_data] = (questions[i], answers[i], embedding_datas[i]) pass def get_scalar_data(self, res_data, **kwargs) -> CacheData: @@ -163,17 +154,12 @@ def __init__( self.o = o def save(self, text, image_url, image_id, answer, embedding, **kwargs): - # model = kwargs.pop("model", None) - # self.import_data([question], [answer], [embedding_data], model) - model = kwargs.pop("model", None) mm_type = kwargs.pop("mm_type", None) self.import_data([text], [image_url], [image_id], [answer], [embedding], model, mm_type) def save_query_resp(self, query_resp_dict, **kwargs): - save_query_start_time = time.time() self.s.insert_query_resp(query_resp_dict, **kwargs) - save_query_delta_time = '{}s'.format(round(time.time() - save_query_start_time, 2)) def _process_answer_data(self, answers: Union[Answer, List[Answer]]): if isinstance(answers, Answer): @@ -198,38 +184,6 @@ def _process_question_data(self, question: Union[str, Question]): return Question(question) - # def import_data( - # self, questions: List[Any], answers: List[Answer], embedding_datas: List[Any], model: Any - # ): - # if len(questions) != len(answers) or len(questions) != len(embedding_datas): - # raise ParamError("Make sure that all parameters have the same length") - # cache_datas = [] - # - # embedding_datas = [ - # normalize(embedding_data) for embedding_data in embedding_datas - # ] - # - # for i, embedding_data in enumerate(embedding_datas): - # if self.o is not None: - # ans = self._process_answer_data(answers[i]) - # else: - # ans = answers[i] - # - # question = questions[i] - # embedding_data = embedding_data.astype("float32") - # cache_datas.append([ans, question, embedding_data, model]) - # - # ids = self.s.batch_insert(cache_datas) - # logging.info('ids: {}'.format(ids)) - # self.v.mul_add( - # [ - # VectorData(id=ids[i], data=embedding_data) - # for i, embedding_data in enumerate(embedding_datas) - # ], - # model - # - # ) - def import_data( self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], embeddings: List[Any], model: Any, mm_type: Any @@ -242,7 +196,6 @@ def import_data( normalize(text_embedding) for text_embedding in embeddings ] - # print('embedding_datas: {}'.format(embedding_datas)) for i, embedding in enumerate(embeddings): if self.o is not None: ans = self._process_answer_data(answers[i]) @@ -251,12 +204,9 @@ def import_data( text = texts[i] image_url = image_urls[i] image_id = image_ids[i] - # iat_embedding = embedding.astype("float32") cache_datas.append([ans, text, image_url, image_id, model]) - # ids = self.s.batch_multimodal_insert(cache_datas) ids = self.s.batch_insert(cache_datas) - # self.v.multimodal_add( self.v.add( [ VectorData(id=ids[i], data=embedding) @@ -266,12 +216,6 @@ def import_data( mm_type ) - # def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: - # cache_data = self.s.get_data_by_id(res_data[1]) - # if cache_data is None: - # return None - # return cache_data - def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: cache_data = self.s.get_data_by_id(res_data[1]) if cache_data is None: @@ -284,12 +228,6 @@ def update_hit_count(self, primary_id, **kwargs): def hit_cache_callback(self, res_data, **kwargs): self.eviction_base.get(res_data[1]) - # def search(self, embedding_data, **kwargs): - # model = kwargs.pop("model", None) - # embedding_data = normalize(embedding_data) - # top_k = kwargs.get("top_k", -1) - # return self.v.search(data=embedding_data, top_k=top_k, model=model) - def search(self, embedding_data, **kwargs): model = kwargs.pop("model", None) mm_type = kwargs.pop("mm_type", None) @@ -301,7 +239,7 @@ def search(self, embedding_data, **kwargs): try: message = str(e) if "no such index" in message: - print('no such index异常,创建索引...') + logging.info('no such index except,creating...') self.v.create(model, mm_type) search_result = self.v.search(data=embedding_data, top_k=top_k, model=model, mm_type=mm_type) except Exception as e: @@ -328,21 +266,6 @@ def create_index(self, model, type, **kwargs): return self.v.create(model, type) def truncate(self, model_name): - # # drop vector base data - # try: - # vector_resp = self.v.rebuild_col(model_name) - # except Exception as e: - # return {'status': 'failed', 'VectorDB': 'truncate VectorDB data failed, please check! e: {}'.format(e), - # 'ScalarDB': 'unexecuted'} - # if vector_resp: - # return {'status': 'failed', 'VectorDB': vector_resp, 'ScalarDB': 'unexecuted'} - # # drop scalar base data - # try: - # delete_count = self.s.model_deleted(model_name) - # except Exception as e: - # return {'status': 'failed', 'VectorDB': 'rebuild', - # 'ScalarDB': 'truncate scalar data failed, please check! e: {}'.format(e)} - # return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} try: resp = self.v.rebuild_idx(model_name) except Exception as e: @@ -350,9 +273,7 @@ def truncate(self, model_name): 'ScalarDB': 'unexecuted'} if resp: - print('resp: {}'.format(resp)) return {'status': 'failed', 'VectorDB': resp, 'ScalarDB': 'unexecuted'} - # drop ocean base model try: delete_count = self.s.model_deleted(model_name) except Exception as e: diff --git a/modelcache_mm/manager/eviction_manager.py b/modelcache_mm/manager/eviction_manager.py index 0a6a406..f64ee7b 100644 --- a/modelcache_mm/manager/eviction_manager.py +++ b/modelcache_mm/manager/eviction_manager.py @@ -1,14 +1,5 @@ # -*- coding: utf-8 -*- class EvictionManager: - """ - EvictionManager to manager the eviction policy. - - :param scalar_storage: CacheStorage to manager the scalar data. - :type scalar_storage: :class:`CacheStorage` - :param vector_base: VectorBase to manager the vector data. - :type vector_base: :class:`VectorBase` - """ - MAX_MARK_COUNT = 5000 MAX_MARK_RATE = 0.1 BATCH_SIZE = 100000 diff --git a/modelcache_mm/manager/scalar_data/sql_storage.py b/modelcache_mm/manager/scalar_data/sql_storage.py index fe7c7f1..c45f679 100644 --- a/modelcache_mm/manager/scalar_data/sql_storage.py +++ b/modelcache_mm/manager/scalar_data/sql_storage.py @@ -79,7 +79,6 @@ def _insert(self, data: List): finally: # Close the connection and return it to the connection pool. conn.close() - print('insert retrun id: {}'.format(id)) return id def batch_insert(self, all_data: List[CacheData]): @@ -139,16 +138,12 @@ def get_data_by_id(self, key: int): table_name = "open_cache_mm_answer" query_sql = "select question_text, image_url, image_id, answer, model from {} where id={}".format(table_name, key) conn = self.pool.connection() - search_start = time.time() try: with conn.cursor() as cursor: - # 执行数据库操作 cursor.execute(query_sql) resp = cursor.fetchone() finally: - # 关闭连接,将连接返回给连接池 conn.close() - print('ob_search_cost_time: {}'.format(time.time() - search_start)) if resp is not None and len(resp) == 5: return resp @@ -160,14 +155,11 @@ def update_hit_count_by_id(self, primary_id: int): update_sql = "UPDATE {} SET hit_count = hit_count+1 WHERE id={}".format(table_name, primary_id) conn = self.pool.connection() - # 使用连接执行更新数据操作 try: with conn.cursor() as cursor: - # 执行更新数据操作 cursor.execute(update_sql) conn.commit() finally: - # 关闭连接,将连接返回给连接池 conn.close() def get_ids(self, deleted=True): @@ -177,33 +169,25 @@ def mark_deleted(self, keys): table_name = "open_cache_mm_answer" delete_sql = "Delete from {} WHERE id in ({})".format(table_name, ",".join([str(i) for i in keys])) - # 从连接池中获取连接 conn = self.pool.connection() try: with conn.cursor() as cursor: - # 执行删除数据操作 cursor.execute(delete_sql) delete_count = cursor.rowcount conn.commit() finally: - # 关闭连接,将连接返回给连接池 conn.close() return delete_count def model_deleted(self, model_name): table_name = "open_cache_mm_answer" delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) - # print('delete_sql: {}'.format(delete_sql)) - # print('delete_sql begin') conn = self.pool.connection() - # 使用连接执行删除数据操作 try: with conn.cursor() as cursor: - # 执行删除数据操作 resp = cursor.execute(delete_sql) conn.commit() finally: - # 关闭连接,将连接返回给连接池 conn.close() return resp diff --git a/modelcache_mm/manager/scalar_data/sql_storage_sqlite.py b/modelcache_mm/manager/scalar_data/sql_storage_sqlite.py index 495fbf7..5d4d422 100644 --- a/modelcache_mm/manager/scalar_data/sql_storage_sqlite.py +++ b/modelcache_mm/manager/scalar_data/sql_storage_sqlite.py @@ -1,74 +1,10 @@ # -*- coding: utf-8 -*- -import os -import time - -import pymysql import json -import base64 from typing import List from modelcache.manager.scalar_data.base import CacheStorage, CacheData import sqlite3 -# def insert_single_data(conn, sql, data_tuple): -# cur = conn.cursor() -# try: -# cur.execute(sql, data_tuple) -# conn.commit() -# id = cur.lastrowid -# # print('id: {}'.format(id)) -# return id -# except Exception as e: -# print(e) -# conn.rollback() -# if cur: -# cur.close() -# -# -# def excute_sql(conn, sql): -# cur = conn.cursor() -# try: -# cur.execute(sql) -# conn.commit() -# except Exception as e: -# print(e) -# conn.rollback() -# if cur: -# cur.close() -# -# -# def excute_delete_sql(conn, sql): -# cur = conn.cursor() -# try: -# cur.execute(sql) -# row_count = cur.rowcount -# conn.commit() -# except Exception as e: -# print(e) -# conn.rollback() -# if cur: -# cur.close() -# return row_count -# -# -# def query_fetch_one_data(conn, sql): -# cursor = conn.cursor() -# try: -# cursor.execute(sql) -# except Exception as e: -# print(e) -# conn.rollback() -# rows = cursor.fetchone() -# if cursor: -# cursor.close() -# return rows -# -# -# def close(conn): -# if conn: -# conn.close() - - class SQLStorage(CacheStorage): def __init__( self, @@ -77,7 +13,6 @@ def __init__( url="./sqlite.db" ): self._url = url - # self._engine = sqlite3.connect(url) self.create() def create(self): diff --git a/modelcache_mm/manager/vector_data/manager.py b/modelcache_mm/manager/vector_data/manager.py index cc3f4b1..1c161cf 100644 --- a/modelcache_mm/manager/vector_data/manager.py +++ b/modelcache_mm/manager/vector_data/manager.py @@ -73,7 +73,6 @@ def get(name, **kwargs): redis_config = kwargs.get("redis_config") mm_dimension = kwargs.get("mm_dimension", DIMENSION) - print('mm_dimension: {}'.format(mm_dimension)) i_dimension = kwargs.get("i_dimension", DIMENSION) t_dimension = kwargs.get("t_dimension", DIMENSION) VectorBase.check_dimension(mm_dimension) diff --git a/modelcache_mm/manager/vector_data/redis.py b/modelcache_mm/manager/vector_data/redis.py index 9c0cb2e..38ccb88 100644 --- a/modelcache_mm/manager/vector_data/redis.py +++ b/modelcache_mm/manager/vector_data/redis.py @@ -61,7 +61,6 @@ def create_index(self, index_name, type, index_prefix): dimension = self.t_dimension else: raise ValueError('dimension type exception') - print('dimension: {}'.format(dimension)) if self._check_index_exists(index_name): modelcache_log.info( "The %s already exists, and it will be used directly", index_name @@ -134,7 +133,6 @@ def rebuild(self, ids=None) -> bool: def rebuild_idx(self, model, mm_type=None): for mm_type in ['IMG_TEXT', 'TEXT']: index_name = get_mm_index_name(model, mm_type) - print('remove index_name: {}'.format(index_name)) if self._check_index_exists(index_name): try: self._client.ft(index_name).dropindex(delete_documents=True) diff --git a/modelcache_mm/processor/pre.py b/modelcache_mm/processor/pre.py index 13bc8f4..f65515d 100644 --- a/modelcache_mm/processor/pre.py +++ b/modelcache_mm/processor/pre.py @@ -83,11 +83,9 @@ def multi_splicing(data_list) -> Any: def multi_analysis(dialog_str): sub_strings = dialog_str.split('|||') - dict_list = [] for s in sub_strings: parts = s.split('###') - if len(parts) == 2: role = parts[0] content = parts[1] @@ -96,14 +94,11 @@ def multi_analysis(dialog_str): content = '###'.join(parts[1:]) else: content = 'exception' - if content == '': d = {"role": role} else: d = {"role": role, "content": content} dict_list.append(d) - - # 3. 将每个字典添加到一个列表中,得到最终的列表 result_list = dict_list # 输出结果 @@ -111,7 +106,6 @@ def multi_analysis(dialog_str): def mm_insert_dict(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: - print('chat_info: {}'.format(data.get("chat_info"))) query_dict = data.get("chat_info")[-1]['query'] return query_dict diff --git a/modelcache_mm/similarity_evaluation/exact_match.py b/modelcache_mm/similarity_evaluation/exact_match.py index 553bd59..2dcb554 100644 --- a/modelcache_mm/similarity_evaluation/exact_match.py +++ b/modelcache_mm/similarity_evaluation/exact_match.py @@ -4,7 +4,6 @@ class ExactMatchEvaluation(SimilarityEvaluation): - def __init__(self): pass diff --git a/modelcache_mm/utils/index_util.py b/modelcache_mm/utils/index_util.py index 48c1a8d..b7d1a55 100644 --- a/modelcache_mm/utils/index_util.py +++ b/modelcache_mm/utils/index_util.py @@ -10,7 +10,6 @@ def get_index_prefix(model): def get_mm_index_name(model, mm_type): - print('mm_type: {}'.format(mm_type)) if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: raise ValueError('mm_type is not normal!') if mm_type == 'IMG_TEXT': @@ -24,7 +23,6 @@ def get_mm_index_name(model, mm_type): def get_mm_index_prefix(model, mm_type): if mm_type not in ['IMG_TEXT', 'mm', 'IMG', 'image', 'TEXT', 'text']: - print('mm_type: {}'.format(mm_type)) raise ValueError('mm_type is not normal!') if mm_type == 'IMG_TEXT': mm_type = 'mm' diff --git a/modelcache_mm/utils/lazy_import.py b/modelcache_mm/utils/lazy_import.py index a0f0c6a..3f24eec 100644 --- a/modelcache_mm/utils/lazy_import.py +++ b/modelcache_mm/utils/lazy_import.py @@ -7,7 +7,6 @@ class LazyImport(ModuleType): """ Lazily import a module. """ - def __init__(self, local_name, parent_module_globals, name): self._local_name = local_name self._parent_module_globals = parent_module_globals diff --git a/modelcache_mm/utils/model_filter.py b/modelcache_mm/utils/model_filter.py deleted file mode 100644 index de38882..0000000 --- a/modelcache_mm/utils/model_filter.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -def model_blacklist_filter(model, request_type): - black_list = ['DI_COPILOT_SECOND', 'DI_COPILOT_LAB', 'DI_COPILOT_THIRD'] - result = None - if model in black_list: - if request_type == 'query': - result = {"errorCode": 105, - "errorDesc": "model: {} in blacklist".format(model), - "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - elif request_type == 'insert': - result = {"errorCode": 305, "errorDesc": "model: {} in blacklist".format(model), "writeStatus": ""} - - return result - - From 2be4ed38f3cf2d7f7fd32a6151ae8ee1323f7f19 Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 21 May 2024 11:18:43 +0800 Subject: [PATCH 37/98] add startup command --- README.md | 8 +++++++- README_CN.md | 11 ++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5f608de..ec35b17 100644 --- a/README.md +++ b/README.md @@ -40,12 +40,18 @@ The project's startup scripts are divided into flask4modelcache.py and flask4mod - Python version: 3.8 and above - Package Installation ```shell -pip install requirements.txt +pip install -r requirements.txt ``` ### Service Startup #### Demo Service Startup 1. Download the embedding model bin file from the following address: [https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place the downloaded bin file in the model/text2vec-base-chinese folder. 2. Start the backend service using the flask4modelcache_dome.py script. +```shell +cd CodeFuse-ModelCache +``` +```shell +python flask4modelcache_demo.py +``` #### Normal Service Startup Before starting the service, the following environment configurations should be performed: diff --git a/README_CN.md b/README_CN.md index bc4172a..8448e94 100644 --- a/README_CN.md +++ b/README_CN.md @@ -37,16 +37,21 @@ Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存 - flask4modelcache_demo.py 为快速测试服务,内嵌了sqlite和faiss,用户无需关心数据库相关事宜。 - flask4modelcache.py 为正常服务,需用户具备mysql和milvus等数据库服务。 ### 环境依赖 - - python版本: 3.8及以上 - 依赖包安装: ```shell -pip install requirements.txt +pip install -r requirements.txt ``` ### 服务启动 #### Demo服务启动 - 离线模型bin文件下载, 参考地址:[https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中。 -- 执行flask4modelcache_demo.py脚本即可启动。 +- 执行flask4modelcache_demo.py启动服务。 +```shell +cd CodeFuse-ModelCache +``` +```shell +python flask4modelcache_demo.py +``` #### 正常服务启动 在启动服务前,应该进行如下环境配置: From 84cace3fc5d98f55e895f3d01f61add7ac91f966 Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 21 May 2024 11:27:37 +0800 Subject: [PATCH 38/98] use markdown format for file paths --- README.md | 6 +++--- README_CN.md | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index ec35b17..934c3ee 100644 --- a/README.md +++ b/README.md @@ -55,11 +55,11 @@ python flask4modelcache_demo.py #### Normal Service Startup Before starting the service, the following environment configurations should be performed: -1. Install the relational database MySQL and import the SQL file to create the data tables. The SQL file can be found at: reference_doc/create_table.sql +1. Install the relational database MySQL and import the SQL file to create the data tables. The SQL file can be found at: ```reference_doc/create_table.sql``` 2. Install the vector database Milvus. 3. Add the database access information to the configuration files: - 1. modelcache/config/milvus_config.ini - 2. modelcache/config/mysql_config.ini + 1. ```modelcache/config/milvus_config.ini ``` + 2. ```modelcache/config/mysql_config.ini``` 4. Download the embedding model bin file from the following address: [https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place the downloaded bin file in the model/text2vec-base-chinese folder. 5. Start the backend service using the flask4modelcache.py script. ## Service-Access diff --git a/README_CN.md b/README_CN.md index 8448e94..0a87c67 100644 --- a/README_CN.md +++ b/README_CN.md @@ -55,11 +55,11 @@ python flask4modelcache_demo.py #### 正常服务启动 在启动服务前,应该进行如下环境配置: -1. 安装关系数据库 mysql, 导入sql创建数据表,sql文件: reference_doc/create_table.sql +1. 安装关系数据库 mysql, 导入sql创建数据表,sql文件:```reference_doc/create_table.sql``` 2. 安装向量数据库milvus 3. 在配置文件中添加数据库访问信息,配置文件为: - 1. modelcache/config/milvus_config.ini - 2. modelcache/config/mysql_config.ini + 1. ```modelcache/config/milvus_config.ini``` + 2. ```modelcache/config/mysql_config.ini``` 4. 离线模型bin文件下载, 参考地址:[https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中 5. 通过flask4modelcache.py脚本启动后端服务。 ## 服务访问 From bf7584af83f7f4fe582476b762f6e94ec0f9be46 Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 21 May 2024 21:39:00 +0800 Subject: [PATCH 39/98] add flask multi cache demo --- flask4multicache.py | 191 ++++++++++++++++++++++++++++++++++++ flask4multicache_demo.py | 204 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 395 insertions(+) create mode 100644 flask4multicache.py create mode 100644 flask4multicache_demo.py diff --git a/flask4multicache.py b/flask4multicache.py new file mode 100644 index 0000000..ee5955b --- /dev/null +++ b/flask4multicache.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +import time +from flask import Flask, request +import logging +import configparser +import json +from modelcache import cache +from modelcache.adapter import adapter +from modelcache.manager import CacheBase, VectorBase, get_data_manager +from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation +from modelcache.processor.pre import query_multi_splicing +from modelcache.processor.pre import insert_multi_splicing +from concurrent.futures import ThreadPoolExecutor +from modelcache.utils.model_filter import model_blacklist_filter +from modelcache.embedding import Data2VecAudio + +# 创建一个Flask实例 +app = Flask(__name__) + + +def response_text(cache_resp): + return cache_resp['data'] + + +def save_query_info(result, model, query, delta_time_log): + cache.data_manager.save_query_resp(result, model=model, query=json.dumps(query, ensure_ascii=False), + delta_time=delta_time_log) + + +def response_hitquery(cache_resp): + return cache_resp['hitQuery'] + + +data2vec = Data2VecAudio() +mysql_config = configparser.ConfigParser() +mysql_config.read('modelcache/config/mysql_config.ini') + +milvus_config = configparser.ConfigParser() +milvus_config.read('modelcache/config/milvus_config.ini') + +# redis_config = configparser.ConfigParser() +# redis_config.read('modelcache/config/redis_config.ini') + + +data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), + VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) + +# data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), +# VectorBase("redis", dimension=data2vec.dimension, redis_config=redis_config)) + + +cache.init( + embedding_func=data2vec.to_embeddings, + data_manager=data_manager, + similarity_evaluation=SearchDistanceEvaluation(), + query_pre_embedding_func=query_multi_splicing, + insert_pre_embedding_func=insert_multi_splicing, +) + +global executor +executor = ThreadPoolExecutor(max_workers=6) + + +@app.route('/welcome') +def first_flask(): # 视图函数 + return 'hello, modelcache!' + + +@app.route('/modelcache', methods=['GET', 'POST']) +def user_backend(): + try: + if request.method == 'POST': + request_data = request.json + elif request.method == 'GET': + request_data = request.args + param_dict = json.loads(request_data) + except Exception as e: + result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) + return json.dumps(result) + + # param parsing + try: + request_type = param_dict.get("type") + + scope = param_dict.get("scope") + if scope is not None: + model = scope.get('model') + model = model.replace('-', '_') + model = model.replace('.', '_') + query = param_dict.get("query") + chat_info = param_dict.get("chat_info") + if request_type is None or request_type not in ['query', 'insert', 'remove', 'register']: + result = {"errorCode": 102, + "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", + "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + cache.data_manager.save_query_resp(result, model=model, query='', delta_time=0) + return json.dumps(result) + except Exception as e: + result = {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + return json.dumps(result) + + # model filter + filter_resp = model_blacklist_filter(model, request_type) + if isinstance(filter_resp, dict): + return json.dumps(filter_resp) + + if request_type == 'query': + try: + start_time = time.time() + response = adapter.ChatCompletion.create_query( + scope={"model": model}, + query=query + ) + delta_time = '{}s'.format(round(time.time() - start_time, 2)) + if response is None: + result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', + "answer": ''} + # elif response in ['adapt_query_exception']: + elif isinstance(response, str): + result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, + "hit_query": '', "answer": ''} + else: + answer = response_text(response) + hit_query = response_hitquery(response) + result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, + "hit_query": hit_query, "answer": answer} + delta_time_log = round(time.time() - start_time, 2) + future = executor.submit(save_query_info, result, model, query, delta_time_log) + except Exception as e: + result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, + "hit_query": '', "answer": ''} + logging.info('result: {}'.format(result)) + + return json.dumps(result, ensure_ascii=False) + + if request_type == 'insert': + try: + try: + response = adapter.ChatCompletion.create_insert( + model=model, + chat_info=chat_info + ) + except Exception as e: + result = {"errorCode": 302, "errorDesc": str(e), "writeStatus": "exception"} + return json.dumps(result, ensure_ascii=False) + + if response == 'success': + result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} + else: + result = {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} + return json.dumps(result, ensure_ascii=False) + except Exception as e: + result = {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} + return json.dumps(result, ensure_ascii=False) + + if request_type == 'remove': + remove_type = param_dict.get("remove_type") + id_list = param_dict.get("id_list", []) + + response = adapter.ChatCompletion.create_remove( + model=model, + remove_type=remove_type, + id_list=id_list + ) + if not isinstance(response, dict): + result = {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} + return json.dumps(result) + + state = response.get('status') + if state == 'success': + result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} + return json.dumps(result) + + if request_type == 'register': + response = adapter.ChatCompletion.create_register( + model=model + ) + if response in ['create_success', 'already_exists']: + result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + result = {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} + return json.dumps(result) + + +if __name__ == '__main__': + app.run(host='0.0.0.0', port=5000, debug=True) diff --git a/flask4multicache_demo.py b/flask4multicache_demo.py new file mode 100644 index 0000000..a55b94f --- /dev/null +++ b/flask4multicache_demo.py @@ -0,0 +1,204 @@ +# -*- coding: utf-8 -*- +import time +from flask import Flask, request +import logging +import json +from modelcache import cache +from modelcache.adapter import adapter +from modelcache.manager import CacheBase, VectorBase, get_data_manager +from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation +from modelcache.processor.pre import query_multi_splicing +from modelcache.processor.pre import insert_multi_splicing +from concurrent.futures import ThreadPoolExecutor +from modelcache.utils.model_filter import model_blacklist_filter +from modelcache.embedding import Data2VecAudio +from modelcache_mm.processor.pre import mm_insert_dict +from modelcache_mm.processor.pre import mm_query_dict +from modelcache_mm.embedding import Clip2Vec + +# 创建一个Flask实例 +app = Flask(__name__) + + +def response_text(cache_resp): + return cache_resp['data'] + + +def save_query_info(result, model, query, delta_time_log): + cache.data_manager.save_query_resp(result, model=model, query=json.dumps(query, ensure_ascii=False), + delta_time=delta_time_log) + + +def response_hitquery(cache_resp): + return cache_resp['hitQuery'] + + +# data2vec = Data2VecAudio() +# data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension)) + +image_dimension = 512 +text_dimension = 512 +clip2vec = Clip2Vec() +data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", + mm_dimension=image_dimension+text_dimension, + i_dimension=image_dimension, + t_dimension=text_dimension)) + + +cache.init( + embedding_func=clip2vec.to_embeddings, + data_manager=data_manager, + similarity_evaluation=SearchDistanceEvaluation(), + insert_pre_embedding_func=mm_insert_dict, + query_pre_embedding_func=mm_query_dict, + ) + +# cache.set_openai_key() +global executor +executor = ThreadPoolExecutor(max_workers=6) + + +@app.route('/welcome') +def first_flask(): # 视图函数 + return 'hello, multicache!' + + +@app.route('/multicache', methods=['GET', 'POST']) +def user_backend(): + try: + if request.method == 'POST': + request_data = request.json + elif request.method == 'GET': + request_data = request.args + param_dict = json.loads(request_data) + except Exception as e: + result = {"errorCode": 301, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) + return json.dumps(result) + + # param parsing + try: + request_type = param_dict.get("request_type") + scope = param_dict.get("scope") + if scope is not None: + model = scope.get('model') + model = model.replace('-', '_') + model = model.replace('.', '_') + + if request_type in ['query', 'insert']: + if request_type == 'query': + query = param_dict.get("query") + elif request_type == 'insert': + chat_info = param_dict.get("chat_info") + query = chat_info[-1]['query'] + + if request_type is None or request_type not in ['query', 'remove', 'insert', 'register']: + result = {"errorCode": 102, + "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", + "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + cache.data_manager.save_query_resp(result, model=model, query='', delta_time=0) + return json.dumps(result) + except Exception as e: + result = {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + return json.dumps(result) + + # model filter + # filter_resp = model_blacklist_filter(model, request_type) + # if isinstance(filter_resp, dict): + # return json.dumps(filter_resp) + + if request_type == 'query': + # if UUID: + # try: + # uuid_list = UUID.split('==>') + # except Exception as e: + try: + start_time = time.time() + response = adapter.ChatCompletion.create_query( + scope={"model": model}, + query=query, + ) + delta_time = '{}s'.format(round(time.time() - start_time, 2)) + if response is None: + result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, + "hit_query": '', "answer": ''} + elif isinstance(response, dict): + answer = response_text(response) + hit_query = response_hitquery(response) + result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, + "hit_query": hit_query, "answer": answer} + else: + result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, + "hit_query": '', "answer": ''} + delta_time_log = round(time.time() - start_time, 3) + + future = executor.submit(save_query_info, result, model, query, delta_time_log) + # query_time = round(time.time() - start_time, 2) + except Exception as e: + raise e + return json.dumps(result, ensure_ascii=False) + + if request_type == 'insert': + # if UUID: + # try: + # uuid_list = UUID.split('==>') + # except Exception as e: + try: + start_time = time.time() + try: + response = adapter.ChatCompletion.create_insert( + model=model, + chat_info=chat_info, + ) + except Exception as e: + raise e + + if response == 'success': + result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} + else: + result = {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} + insert_time = round(time.time() - start_time, 2) + return json.dumps(result, ensure_ascii=False) + except Exception as e: + raise e + + if request_type == 'remove': + remove_type = param_dict.get("remove_type") + id_list = param_dict.get("id_list", []) + + response = adapter.ChatCompletion.create_remove( + model=model, + remove_type=remove_type, + id_list=id_list + ) + + if not isinstance(response, dict): + result = {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} + return json.dumps(result) + + state = response.get('status') + # if response == 'success': + if state == 'success': + result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} + return json.dumps(result) + + if request_type == 'register': + type = param_dict.get("type") + response = adapter.ChatCompletion.create_register( + model=model, + type=type + ) + if response in ['create_success', 'already_exists']: + result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + result = {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} + return json.dumps(result) + + +if __name__ == '__main__': + # app.run(host='0.0.0.0', port=5000, debug=True) + app.run(host='0.0.0.0', port=5000) From d5225600f3edf4eb6b28cee54afb4db7039ded34 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 22 May 2024 11:08:37 +0800 Subject: [PATCH 40/98] add flask4multicache service --- examples/__init__.py | 11 ++ examples/flask/llms_cache/__init__.py | 12 ++ .../flask/{ => llms_cache}/data_insert.py | 0 examples/flask/{ => llms_cache}/data_query.py | 0 .../flask/{ => llms_cache}/data_query_long.py | 0 examples/flask/{ => llms_cache}/register.py | 0 examples/flask/multi_cache/__init__.py | 12 ++ examples/flask/multi_cache/data_insert.py | 31 +++++ examples/flask/multi_cache/data_query.py | 28 +++++ examples/flask/multi_cache/register.py | 22 ++++ examples/flask/multi_cache/remove.py | 23 ++++ flask4modelcache_demo.py | 1 - flask4multicache.py | 115 ++++++++++-------- flask4multicache_demo.py | 34 ++---- .../manager/scalar_data/sql_storage.py | 1 - modelcache_mm/utils/index_util.py | 2 +- 16 files changed, 211 insertions(+), 81 deletions(-) create mode 100644 examples/flask/llms_cache/__init__.py rename examples/flask/{ => llms_cache}/data_insert.py (100%) rename examples/flask/{ => llms_cache}/data_query.py (100%) rename examples/flask/{ => llms_cache}/data_query_long.py (100%) rename examples/flask/{ => llms_cache}/register.py (100%) create mode 100644 examples/flask/multi_cache/__init__.py create mode 100644 examples/flask/multi_cache/data_insert.py create mode 100644 examples/flask/multi_cache/data_query.py create mode 100644 examples/flask/multi_cache/register.py create mode 100644 examples/flask/multi_cache/remove.py diff --git a/examples/__init__.py b/examples/__init__.py index 40a96af..0705126 100644 --- a/examples/__init__.py +++ b/examples/__init__.py @@ -1 +1,12 @@ # -*- coding: utf-8 -*- +""" + Alipay.com Inc. + Copyright (c) 2004-2023 All Rights Reserved. + ------------------------------------------------------ + File Name : __init__.py.py + Author : fuhui.phe + Create Time : 2024/5/22 11:03 + Description : description what the main function of this file + Change Activity: + version0 : 2024/5/22 11:03 by fuhui.phe init +""" diff --git a/examples/flask/llms_cache/__init__.py b/examples/flask/llms_cache/__init__.py new file mode 100644 index 0000000..f433d63 --- /dev/null +++ b/examples/flask/llms_cache/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" + Alipay.com Inc. + Copyright (c) 2004-2023 All Rights Reserved. + ------------------------------------------------------ + File Name : __init__.py.py + Author : fuhui.phe + Create Time : 2024/5/22 10:28 + Description : description what the main function of this file + Change Activity: + version0 : 2024/5/22 10:28 by fuhui.phe init +""" diff --git a/examples/flask/data_insert.py b/examples/flask/llms_cache/data_insert.py similarity index 100% rename from examples/flask/data_insert.py rename to examples/flask/llms_cache/data_insert.py diff --git a/examples/flask/data_query.py b/examples/flask/llms_cache/data_query.py similarity index 100% rename from examples/flask/data_query.py rename to examples/flask/llms_cache/data_query.py diff --git a/examples/flask/data_query_long.py b/examples/flask/llms_cache/data_query_long.py similarity index 100% rename from examples/flask/data_query_long.py rename to examples/flask/llms_cache/data_query_long.py diff --git a/examples/flask/register.py b/examples/flask/llms_cache/register.py similarity index 100% rename from examples/flask/register.py rename to examples/flask/llms_cache/register.py diff --git a/examples/flask/multi_cache/__init__.py b/examples/flask/multi_cache/__init__.py new file mode 100644 index 0000000..b003444 --- /dev/null +++ b/examples/flask/multi_cache/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" + Alipay.com Inc. + Copyright (c) 2004-2023 All Rights Reserved. + ------------------------------------------------------ + File Name : __init__.py.py + Author : fuhui.phe + Create Time : 2024/5/22 10:29 + Description : description what the main function of this file + Change Activity: + version0 : 2024/5/22 10:29 by fuhui.phe init +""" diff --git a/examples/flask/multi_cache/data_insert.py b/examples/flask/multi_cache/data_insert.py new file mode 100644 index 0000000..3d173b8 --- /dev/null +++ b/examples/flask/multi_cache/data_insert.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +import time +import json +import uuid +import requests + + +def run(): + url = 'http://127.0.0.1:5000/multicache' + + request_type = 'insert' + scope = {"model": "multimodal_test"} + # UUID = "820b0052-d9d8-11ee-95f1-52775e3e6fd1" + "==>" + str(time.time()) + UUID = str(uuid.uuid1()) + "==>" + str(time.time()) + img_data = "https://img0.baidu.com/it/u=1436460262,4166266890&fm=253&fmt=auto&app=138&f=JPEG?w=500&h=282" + query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], + 'imageRaw': '', + 'imageUrl': img_data, + 'imageId': 'ccc'} + answer = "应该注意小孩不要跑到铁轨上" + chat_info = [{"query": query, "answer": answer}] + data_dict = {'request_type': request_type, 'scope': scope, 'chat_info': chat_info, 'UUID': UUID} + + headers = {"Content-Type": "application/json"} + res = requests.post(url, headers=headers, json=json.dumps(data_dict)) + res_text = res.text + print('res_text: {}'.format(res_text)) + + +if __name__ == '__main__': + run() diff --git a/examples/flask/multi_cache/data_query.py b/examples/flask/multi_cache/data_query.py new file mode 100644 index 0000000..ccfc335 --- /dev/null +++ b/examples/flask/multi_cache/data_query.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +import json +import requests +import uuid +import time + + +def run(): + url = 'http://127.0.0.1:5000/multicache' + request_type = 'query' + UUID = str(uuid.uuid1()) + "==>" + str(time.time()) + scope = {"model": "multimodal_test"} + img_data = "https://img0.baidu.com/it/u=1436460262,4166266890&fm=253&fmt=auto&app=138&f=JPEG?w=500&h=282" + query = {'text': ['父母带着孩子来这个地方可能会有什么顾虑'], + 'imageRaw': '', + 'imageUrl': img_data, + 'multiType': 'IMG_TEXT'} + + data = {'request_type': request_type, 'scope': scope, 'query': query, 'UUID': UUID} + + headers = {"Content-Type": "application/json"} + res = requests.post(url, headers=headers, json=json.dumps(data)) + res_text = res.text + print('res_text: {}'.format(res_text)) + + +if __name__ == '__main__': + run() diff --git a/examples/flask/multi_cache/register.py b/examples/flask/multi_cache/register.py new file mode 100644 index 0000000..4ca830b --- /dev/null +++ b/examples/flask/multi_cache/register.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +""" +register index for redis +""" +import json +import requests + + +def run(): + url = 'http://127.0.0.1:5000/multicache' + request_type = 'register' + scope = {"model": "multimodal_test"} + type = 'IMG_TEXT' + data = {'request_type': request_type, 'scope': scope, 'type': type} + headers = {"Content-Type": "application/json"} + res = requests.post(url, headers=headers, json=json.dumps(data)) + res_text = res.text + print('res_text: {}'.format(res_text)) + + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/examples/flask/multi_cache/remove.py b/examples/flask/multi_cache/remove.py new file mode 100644 index 0000000..ffad449 --- /dev/null +++ b/examples/flask/multi_cache/remove.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +""" +register index for redis +""" +import json +import requests + + +def run(): + url = 'http://127.0.0.1:5000/multicache' + request_type = 'remove' + scope = {"model": "multimodal_test"} + remove_type = 'truncate_by_model' + data = {'request_type': request_type, 'scope': scope, 'remove_type': remove_type} + + headers = {"Content-Type": "application/json"} + res = requests.post(url, headers=headers, json=json.dumps(data)) + res_text = res.text + print('res_text: {}'.format(res_text)) + + +if __name__ == '__main__': + run() diff --git a/flask4modelcache_demo.py b/flask4modelcache_demo.py index dc163b5..54b9e8e 100644 --- a/flask4modelcache_demo.py +++ b/flask4modelcache_demo.py @@ -117,7 +117,6 @@ def user_backend(): result = {"errorCode": 202, "errorDesc": e, "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} logging.info('result: {}'.format(result)) - return json.dumps(result, ensure_ascii=False) if request_type == 'insert': diff --git a/flask4multicache.py b/flask4multicache.py index ee5955b..f59cf1f 100644 --- a/flask4multicache.py +++ b/flask4multicache.py @@ -1,18 +1,20 @@ -# -*- coding: utf-8 -*- import time from flask import Flask, request import logging -import configparser import json -from modelcache import cache -from modelcache.adapter import adapter -from modelcache.manager import CacheBase, VectorBase, get_data_manager -from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation -from modelcache.processor.pre import query_multi_splicing -from modelcache.processor.pre import insert_multi_splicing +import configparser from concurrent.futures import ThreadPoolExecutor -from modelcache.utils.model_filter import model_blacklist_filter -from modelcache.embedding import Data2VecAudio +from modelcache_mm import cache +from modelcache_mm.adapter import adapter +from modelcache_mm.manager import CacheBase, VectorBase, get_data_manager +from modelcache_mm.similarity_evaluation.distance import SearchDistanceEvaluation +# from modelcache.processor.pre import query_multi_splicing +# from modelcache.processor.pre import insert_multi_splicing +# from modelcache.utils.model_filter import model_blacklist_filter +# from modelcache.embedding import Data2VecAudio +from modelcache_mm.processor.pre import mm_insert_dict +from modelcache_mm.processor.pre import mm_query_dict +from modelcache_mm.embedding import Clip2Vec # 创建一个Flask实例 app = Flask(__name__) @@ -31,31 +33,35 @@ def response_hitquery(cache_resp): return cache_resp['hitQuery'] -data2vec = Data2VecAudio() +# data2vec = Data2VecAudio() mysql_config = configparser.ConfigParser() mysql_config.read('modelcache/config/mysql_config.ini') -milvus_config = configparser.ConfigParser() -milvus_config.read('modelcache/config/milvus_config.ini') +# milvus_config = configparser.ConfigParser() +# milvus_config.read('modelcache/config/milvus_config.ini') -# redis_config = configparser.ConfigParser() -# redis_config.read('modelcache/config/redis_config.ini') +redis_config = configparser.ConfigParser() +redis_config.read('modelcache/config/redis_config.ini') +image_dimension = 512 +text_dimension = 512 +clip2vec = Clip2Vec() data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), - VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) - + VectorBase("redis", mm_dimension=image_dimension+text_dimension, + i_dimension=image_dimension, t_dimension=text_dimension, + redis_config=redis_config)) # data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), # VectorBase("redis", dimension=data2vec.dimension, redis_config=redis_config)) cache.init( - embedding_func=data2vec.to_embeddings, - data_manager=data_manager, - similarity_evaluation=SearchDistanceEvaluation(), - query_pre_embedding_func=query_multi_splicing, - insert_pre_embedding_func=insert_multi_splicing, -) + embedding_func=clip2vec.to_embeddings, + data_manager=data_manager, + similarity_evaluation=SearchDistanceEvaluation(), + insert_pre_embedding_func=mm_insert_dict, + query_pre_embedding_func=mm_query_dict, + ) global executor executor = ThreadPoolExecutor(max_workers=6) @@ -63,10 +69,10 @@ def response_hitquery(cache_resp): @app.route('/welcome') def first_flask(): # 视图函数 - return 'hello, modelcache!' + return 'hello, llms_cache!' -@app.route('/modelcache', methods=['GET', 'POST']) +@app.route('/multicache', methods=['GET', 'POST']) def user_backend(): try: if request.method == 'POST': @@ -75,23 +81,28 @@ def user_backend(): request_data = request.args param_dict = json.loads(request_data) except Exception as e: - result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + result = {"errorCode": 301, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) return json.dumps(result) # param parsing try: - request_type = param_dict.get("type") - + request_type = param_dict.get("request_type") scope = param_dict.get("scope") if scope is not None: model = scope.get('model') model = model.replace('-', '_') model = model.replace('.', '_') - query = param_dict.get("query") - chat_info = param_dict.get("chat_info") - if request_type is None or request_type not in ['query', 'insert', 'remove', 'register']: + + if request_type in ['query', 'insert']: + if request_type == 'query': + query = param_dict.get("query") + elif request_type == 'insert': + chat_info = param_dict.get("chat_info") + query = chat_info[-1]['query'] + + if request_type is None or request_type not in ['query', 'remove', 'insert', 'register']: result = {"errorCode": 102, "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} @@ -102,59 +113,51 @@ def user_backend(): "answer": ''} return json.dumps(result) - # model filter - filter_resp = model_blacklist_filter(model, request_type) - if isinstance(filter_resp, dict): - return json.dumps(filter_resp) - if request_type == 'query': try: start_time = time.time() response = adapter.ChatCompletion.create_query( scope={"model": model}, - query=query + query=query, ) delta_time = '{}s'.format(round(time.time() - start_time, 2)) if response is None: - result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', - "answer": ''} - # elif response in ['adapt_query_exception']: - elif isinstance(response, str): - result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, + result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} - else: + elif isinstance(response, dict): answer = response_text(response) hit_query = response_hitquery(response) result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, "hit_query": hit_query, "answer": answer} - delta_time_log = round(time.time() - start_time, 2) + else: + result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, + "hit_query": '', "answer": ''} + delta_time_log = round(time.time() - start_time, 3) + future = executor.submit(save_query_info, result, model, query, delta_time_log) except Exception as e: - result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, - "hit_query": '', "answer": ''} - logging.info('result: {}'.format(result)) - + raise e return json.dumps(result, ensure_ascii=False) if request_type == 'insert': try: + start_time = time.time() try: response = adapter.ChatCompletion.create_insert( model=model, - chat_info=chat_info + chat_info=chat_info, ) except Exception as e: - result = {"errorCode": 302, "errorDesc": str(e), "writeStatus": "exception"} - return json.dumps(result, ensure_ascii=False) + raise e if response == 'success': result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} else: result = {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} + insert_time = round(time.time() - start_time, 2) return json.dumps(result, ensure_ascii=False) except Exception as e: - result = {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} - return json.dumps(result, ensure_ascii=False) + raise e if request_type == 'remove': remove_type = param_dict.get("remove_type") @@ -165,11 +168,13 @@ def user_backend(): remove_type=remove_type, id_list=id_list ) + if not isinstance(response, dict): result = {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} return json.dumps(result) state = response.get('status') + # if response == 'success': if state == 'success': result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} else: @@ -177,8 +182,10 @@ def user_backend(): return json.dumps(result) if request_type == 'register': + type = param_dict.get("type") response = adapter.ChatCompletion.create_register( - model=model + model=model, + type=type ) if response in ['create_success', 'already_exists']: result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} diff --git a/flask4multicache_demo.py b/flask4multicache_demo.py index a55b94f..3f205fd 100644 --- a/flask4multicache_demo.py +++ b/flask4multicache_demo.py @@ -3,15 +3,15 @@ from flask import Flask, request import logging import json -from modelcache import cache -from modelcache.adapter import adapter -from modelcache.manager import CacheBase, VectorBase, get_data_manager -from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation -from modelcache.processor.pre import query_multi_splicing -from modelcache.processor.pre import insert_multi_splicing from concurrent.futures import ThreadPoolExecutor -from modelcache.utils.model_filter import model_blacklist_filter -from modelcache.embedding import Data2VecAudio +from modelcache_mm import cache +from modelcache_mm.adapter import adapter +from modelcache_mm.manager import CacheBase, VectorBase, get_data_manager +from modelcache_mm.similarity_evaluation.distance import SearchDistanceEvaluation +# from modelcache.processor.pre import query_multi_splicing +# from modelcache.processor.pre import insert_multi_splicing +# from modelcache.utils.model_filter import model_blacklist_filter +# from modelcache.embedding import Data2VecAudio from modelcache_mm.processor.pre import mm_insert_dict from modelcache_mm.processor.pre import mm_query_dict from modelcache_mm.embedding import Clip2Vec @@ -60,10 +60,10 @@ def response_hitquery(cache_resp): @app.route('/welcome') def first_flask(): # 视图函数 - return 'hello, multicache!' + return 'hello, llms_cache!' -@app.route('/multicache', methods=['GET', 'POST']) +@app.route('/llms_cache', methods=['GET', 'POST']) def user_backend(): try: if request.method == 'POST': @@ -104,16 +104,7 @@ def user_backend(): "answer": ''} return json.dumps(result) - # model filter - # filter_resp = model_blacklist_filter(model, request_type) - # if isinstance(filter_resp, dict): - # return json.dumps(filter_resp) - if request_type == 'query': - # if UUID: - # try: - # uuid_list = UUID.split('==>') - # except Exception as e: try: start_time = time.time() response = adapter.ChatCompletion.create_query( @@ -135,16 +126,11 @@ def user_backend(): delta_time_log = round(time.time() - start_time, 3) future = executor.submit(save_query_info, result, model, query, delta_time_log) - # query_time = round(time.time() - start_time, 2) except Exception as e: raise e return json.dumps(result, ensure_ascii=False) if request_type == 'insert': - # if UUID: - # try: - # uuid_list = UUID.split('==>') - # except Exception as e: try: start_time = time.time() try: diff --git a/modelcache_mm/manager/scalar_data/sql_storage.py b/modelcache_mm/manager/scalar_data/sql_storage.py index c45f679..f80fb1b 100644 --- a/modelcache_mm/manager/scalar_data/sql_storage.py +++ b/modelcache_mm/manager/scalar_data/sql_storage.py @@ -16,7 +16,6 @@ def __init__( db_type: str = "mysql", config=None ): - self.host = config.get('mysql', 'host') self.port = int(config.get('mysql', 'port')) self.username = config.get('mysql', 'username') diff --git a/modelcache_mm/utils/index_util.py b/modelcache_mm/utils/index_util.py index b7d1a55..96665bf 100644 --- a/modelcache_mm/utils/index_util.py +++ b/modelcache_mm/utils/index_util.py @@ -18,7 +18,7 @@ def get_mm_index_name(model, mm_type): mm_type = 'image' elif mm_type == 'TEXT': mm_type = 'text' - return 'multicache' + '_' + model + '_' + mm_type + return 'llms_cache' + '_' + model + '_' + mm_type def get_mm_index_prefix(model, mm_type): From baa1dddc612b16d9737043eb706ba4b0983883b4 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 22 May 2024 11:18:30 +0800 Subject: [PATCH 41/98] Correct the prefix issue with index and remove redundant comments. --- examples/__init__.py | 11 ----------- examples/flask/llms_cache/__init__.py | 11 ----------- examples/flask/multi_cache/__init__.py | 11 ----------- modelcache_mm/utils/index_util.py | 4 ++-- 4 files changed, 2 insertions(+), 35 deletions(-) diff --git a/examples/__init__.py b/examples/__init__.py index 0705126..40a96af 100644 --- a/examples/__init__.py +++ b/examples/__init__.py @@ -1,12 +1 @@ # -*- coding: utf-8 -*- -""" - Alipay.com Inc. - Copyright (c) 2004-2023 All Rights Reserved. - ------------------------------------------------------ - File Name : __init__.py.py - Author : fuhui.phe - Create Time : 2024/5/22 11:03 - Description : description what the main function of this file - Change Activity: - version0 : 2024/5/22 11:03 by fuhui.phe init -""" diff --git a/examples/flask/llms_cache/__init__.py b/examples/flask/llms_cache/__init__.py index f433d63..40a96af 100644 --- a/examples/flask/llms_cache/__init__.py +++ b/examples/flask/llms_cache/__init__.py @@ -1,12 +1 @@ # -*- coding: utf-8 -*- -""" - Alipay.com Inc. - Copyright (c) 2004-2023 All Rights Reserved. - ------------------------------------------------------ - File Name : __init__.py.py - Author : fuhui.phe - Create Time : 2024/5/22 10:28 - Description : description what the main function of this file - Change Activity: - version0 : 2024/5/22 10:28 by fuhui.phe init -""" diff --git a/examples/flask/multi_cache/__init__.py b/examples/flask/multi_cache/__init__.py index b003444..40a96af 100644 --- a/examples/flask/multi_cache/__init__.py +++ b/examples/flask/multi_cache/__init__.py @@ -1,12 +1 @@ # -*- coding: utf-8 -*- -""" - Alipay.com Inc. - Copyright (c) 2004-2023 All Rights Reserved. - ------------------------------------------------------ - File Name : __init__.py.py - Author : fuhui.phe - Create Time : 2024/5/22 10:29 - Description : description what the main function of this file - Change Activity: - version0 : 2024/5/22 10:29 by fuhui.phe init -""" diff --git a/modelcache_mm/utils/index_util.py b/modelcache_mm/utils/index_util.py index 96665bf..efea863 100644 --- a/modelcache_mm/utils/index_util.py +++ b/modelcache_mm/utils/index_util.py @@ -2,7 +2,7 @@ def get_index_name(model): - return 'modelcache' + '_' + model + return 'multicache' + '_' + model def get_index_prefix(model): @@ -18,7 +18,7 @@ def get_mm_index_name(model, mm_type): mm_type = 'image' elif mm_type == 'TEXT': mm_type = 'text' - return 'llms_cache' + '_' + model + '_' + mm_type + return 'multi_cache' + '_' + model + '_' + mm_type def get_mm_index_prefix(model, mm_type): From f0f5d797b083e93d8e1e73694c458cc60c8a91f1 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 22 May 2024 11:25:42 +0800 Subject: [PATCH 42/98] fix issue: model params not used in code --- modelcache/embedding/data2vec.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modelcache/embedding/data2vec.py b/modelcache/embedding/data2vec.py index 274eb5a..529827b 100644 --- a/modelcache/embedding/data2vec.py +++ b/modelcache/embedding/data2vec.py @@ -14,11 +14,11 @@ def mean_pooling(model_output, attention_mask): class Data2VecAudio(BaseEmbedding): - def __init__(self, model: str = "sentence-transformers/all-MiniLM-L6-v2"): + def __init__(self, model: str = "model/text2vec-base-chinese/"): current_dir = os.path.dirname(os.path.abspath(__file__)) parent_dir = os.path.dirname(current_dir) model_dir = os.path.dirname(parent_dir) - model = os.path.join(model_dir, 'model/text2vec-base-chinese/') + model = os.path.join(model_dir, model) try: self.__dimension = self.model.config.hidden_size From 0d3843435c2ad64728f82ff6713d04489512d5c4 Mon Sep 17 00:00:00 2001 From: fuhui Date: Wed, 22 May 2024 11:41:34 +0800 Subject: [PATCH 43/98] Correct the format --- modelcache/embedding/data2vec.py | 2 +- modelcache_mm/adapter/adapter_query.py | 61 +++++++++++++------------- modelcache_mm/embedding/clip.py | 8 +--- modelcache_mm/manager/data_manager.py | 4 +- 4 files changed, 35 insertions(+), 40 deletions(-) diff --git a/modelcache/embedding/data2vec.py b/modelcache/embedding/data2vec.py index 529827b..3f450fb 100644 --- a/modelcache/embedding/data2vec.py +++ b/modelcache/embedding/data2vec.py @@ -8,7 +8,7 @@ def mean_pooling(model_output, attention_mask): - token_embeddings = model_output[0] # First element of model_output contains all token embeddings + token_embeddings = model_output[0] input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) diff --git a/modelcache_mm/adapter/adapter_query.py b/modelcache_mm/adapter/adapter_query.py index 776f076..dc01e62 100644 --- a/modelcache_mm/adapter/adapter_query.py +++ b/modelcache_mm/adapter/adapter_query.py @@ -47,37 +47,36 @@ def adapt_query(cache_data_convert, *args, **kwargs): else: raise MultiTypeError - embedding_data = None - mm_type = None - if cache_enable: - if pre_multi_type == 'IMG_TEXT': - embedding_data_resp = time_cal( - chat_cache.embedding_func, - func_name="iat_embedding", - report_func=chat_cache.report.embedding, - )(data_dict) - else: - embedding_data_resp = time_cal( - chat_cache.embedding_func, - func_name="iat_embedding", - report_func=chat_cache.report.embedding, - )(data_dict) - image_embeddings = embedding_data_resp['image_embedding'] - text_embeddings = embedding_data_resp['text_embeddings'] - - if len(image_embeddings) > 0 and len(image_embeddings) > 0: - embedding_data = np.concatenate((image_embeddings, text_embeddings)) - mm_type = 'mm' - elif len(image_embeddings) > 0: - image_embedding = np.array(image_embeddings[0]) - embedding_data = image_embedding - mm_type = 'image' - elif len(text_embeddings) > 0: - text_embedding = np.array(text_embeddings[0]) - embedding_data = text_embedding - mm_type = 'text' - else: - raise ValueError('maya embedding service return both empty list, please check!') + # embedding_data = None + # mm_type = None + if pre_multi_type == 'IMG_TEXT': + embedding_data_resp = time_cal( + chat_cache.embedding_func, + func_name="mm_embedding", + report_func=chat_cache.report.embedding, + )(data_dict) + else: + embedding_data_resp = time_cal( + chat_cache.embedding_func, + func_name="mm_embedding", + report_func=chat_cache.report.embedding, + )(data_dict) + image_embeddings = embedding_data_resp['image_embedding'] + text_embeddings = embedding_data_resp['text_embeddings'] + + if len(image_embeddings) > 0 and len(image_embeddings) > 0: + embedding_data = np.concatenate((image_embeddings, text_embeddings)) + # mm_type = 'mm' + elif len(image_embeddings) > 0: + image_embedding = np.array(image_embeddings[0]) + embedding_data = image_embedding + # mm_type = 'image' + elif len(text_embeddings) > 0: + text_embedding = np.array(text_embeddings[0]) + embedding_data = text_embedding + # mm_type = 'text' + else: + raise ValueError('maya embedding service return both empty list, please check!') if cache_enable: cache_data_list = time_cal( diff --git a/modelcache_mm/embedding/clip.py b/modelcache_mm/embedding/clip.py index d756f23..e2e2798 100644 --- a/modelcache_mm/embedding/clip.py +++ b/modelcache_mm/embedding/clip.py @@ -18,8 +18,8 @@ def to_embeddings(self, data_dict, **_): text_list = data_dict['text'] image_data = data_dict['image'] - img_data = None - txt_data = None + # img_data = None + # txt_data = None if image_data: input_img = load_image(image_data) @@ -46,8 +46,4 @@ def post_proc(self, token_embeddings, inputs): @property def dimension(self): - """Embedding dimension. - - :return: embedding dimension - """ return self.__dimension diff --git a/modelcache_mm/manager/data_manager.py b/modelcache_mm/manager/data_manager.py index 875f1a5..338ef15 100644 --- a/modelcache_mm/manager/data_manager.py +++ b/modelcache_mm/manager/data_manager.py @@ -34,7 +34,7 @@ def save_query_resp(self, query_resp_dict, **kwargs): @abstractmethod def import_data( self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], - embeddings: List[Any], model: Any, iat_type: Any + embeddings: List[Any], model: Any, mm_type: Any ): pass @@ -96,7 +96,7 @@ def save_query_resp(self, query_resp_dict, **kwargs): def import_data( self, texts: List[Any], image_urls: List[Any], image_ids: List[Any], answers: List[Answer], - embeddings: List[Any], model: Any, iat_type: Any + embeddings: List[Any], model: Any, mm_type: Any ): pass From 4dd50fe151eb1ee96d4d8dc354b48ebccae26430 Mon Sep 17 00:00:00 2001 From: powerli2002 <845160705@qq.com> Date: Wed, 22 May 2024 17:24:27 +0800 Subject: [PATCH 44/98] fix: Fixed an issue where clearing faiss database was invalid when using flask4modelcache_demo --- modelcache/manager/vector_data/faiss.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modelcache/manager/vector_data/faiss.py b/modelcache/manager/vector_data/faiss.py index f035ded..0f8445c 100644 --- a/modelcache/manager/vector_data/faiss.py +++ b/modelcache/manager/vector_data/faiss.py @@ -34,7 +34,10 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): return list(zip(dist[0], ids)) def rebuild_col(self, ids=None): - return True + try: + self._index.reset() + except Exception as e: + return f"An error occurred during index rebuild: {e}" def rebuild(self, ids=None): return True From 5b8931ac6b1717177b1081dd2bcb67570e4c3765 Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 23 May 2024 16:50:42 +0800 Subject: [PATCH 45/98] add todo list in readme_cn.md --- README_CN.md | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/README_CN.md b/README_CN.md index 0a87c67..327bd40 100644 --- a/README_CN.md +++ b/README_CN.md @@ -250,11 +250,23 @@ https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ - 异步日志回写能力,用于数据分析和统计 - 增加model字段和数据统计字段,用于功能拓展。 -未来会持续建设的功能: +## Todo List +### Adapter +- [ ] register adapter for Milvus:根据scope中的model参数,初始化对应Collection 并且执行load操作。 +Embedding model&inference +- [ ] inference优化:优化embedding推理速度,适配fastertransformer, TurboTransformers, ByteTransformer等推理引擎 +- [ ] 兼容huggingface模型和modelscope模型,提供更多模型加载方式。 +### Scalar Storage +- [ ] Support MongoDB +- [ ] Support ElasticSearch +### Vector Storage +- [ ] 在多模态场景中适配faiss存储 +### Rank能力 +- [ ] 增加Rank模型,对embedding召回后的数据,进行精排 +### Service +- [ ] 支持fastapi +- [ ] 增加前端界面,用于测试 -- [ ] 基于超参数的数据隔离 -- [ ] system promt分区存储能力,以提高相似度匹配的准确度和效率 -- [ ] 更通用的embedding模型和相似度评估算法 ## 致谢 本项目参考了以下开源项目,在此对相关项目和研究开发人员表示感谢。
    [GPTCache](https://github.com/zilliztech/GPTCache) From 85b777ff9a0eca5eb68d33a96cf65ae26914397b Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 23 May 2024 17:08:31 +0800 Subject: [PATCH 46/98] add todo list in readme.md --- README.md | 20 ++++++++++++++++---- README_CN.md | 18 +++++++++--------- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 934c3ee..fd5331e 100644 --- a/README.md +++ b/README.md @@ -251,11 +251,23 @@ In ModelCache, we adopted the main idea of GPTCache, includes core modules: ada - Asynchronous log write-back capability for data analysis and statistics. - Added model field and data statistics field for feature expansion. -Future Features Under Development: +## Todo List +### Adapter +- [ ] Register adapter for Milvus:Based on the "model" parameter in the scope, initialize the corresponding Collection and perform the load operation. +### Embedding model&inference +- [ ] Inference Optimization: Optimizing the speed of embedding inference, compatible with inference engines such as FasterTransformer, TurboTransformers, and ByteTransformer. +- [ ] Compatibility with Hugging Face models and ModelScope models, offering more methods for model loading. +### Scalar Storage +- [ ] Support MongoDB +- [ ] Support ElasticSearch +### Vector Storage +- [ ] Adapts Faiss storage in multimodal scenarios. +### Rank能力 +- [ ] Add ranking model to refine the order of data after embedding recall. +### Service +- [ ] Supports FastAPI. +- [ ] Add visual interface to offer a more direct user experience. -- [ ] Data isolation based on hyperparameters. -- [ ] System prompt partitioning storage capability to enhance accuracy and efficiency of similarity matching. -- [ ] More versatile embedding models and similarity evaluation algorithms. ## Acknowledgements This project has referenced the following open-source projects. We would like to express our gratitude to the projects and their developers for their contributions and research.
    [GPTCache](https://github.com/zilliztech/GPTCache) diff --git a/README_CN.md b/README_CN.md index 327bd40..4eba507 100644 --- a/README_CN.md +++ b/README_CN.md @@ -253,19 +253,19 @@ https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ ## Todo List ### Adapter - [ ] register adapter for Milvus:根据scope中的model参数,初始化对应Collection 并且执行load操作。 -Embedding model&inference -- [ ] inference优化:优化embedding推理速度,适配fastertransformer, TurboTransformers, ByteTransformer等推理引擎 +### Embedding model&inference +- [ ] inference优化:优化embedding推理速度,适配fastertransformer, TurboTransformers, ByteTransformer等推理引擎。 - [ ] 兼容huggingface模型和modelscope模型,提供更多模型加载方式。 ### Scalar Storage -- [ ] Support MongoDB -- [ ] Support ElasticSearch +- [ ] Support MongoDB。 +- [ ] Support ElasticSearch。 ### Vector Storage -- [ ] 在多模态场景中适配faiss存储 -### Rank能力 -- [ ] 增加Rank模型,对embedding召回后的数据,进行精排 +- [ ] 在多模态场景中适配faiss存储。 +### Ranking +- [ ] 增加Rank模型,对embedding召回后的数据,进行精排。 ### Service -- [ ] 支持fastapi -- [ ] 增加前端界面,用于测试 +- [ ] 支持fastapi。 +- [ ] 增加前端界面,用于测试。 ## 致谢 本项目参考了以下开源项目,在此对相关项目和研究开发人员表示感谢。
    [GPTCache](https://github.com/zilliztech/GPTCache) From 70d2ab1bc918c0315dc270bde39ccfea739c888f Mon Sep 17 00:00:00 2001 From: fuhui Date: Thu, 23 May 2024 19:56:02 +0800 Subject: [PATCH 47/98] sqlite does not correctly return the number of rows deleted, made some adjustments and fixes --- modelcache/embedding/__init__.py | 2 +- .../manager/scalar_data/sql_storage_sqlite.py | 40 ++++--------------- 2 files changed, 9 insertions(+), 33 deletions(-) diff --git a/modelcache/embedding/__init__.py b/modelcache/embedding/__init__.py index 03b6762..eb6ca80 100644 --- a/modelcache/embedding/__init__.py +++ b/modelcache/embedding/__init__.py @@ -12,7 +12,7 @@ def Huggingface(model="sentence-transformers/all-mpnet-base-v2"): return huggingface.Huggingface(model) -def Data2VecAudio(model="facebook/data2vec-audio-base-960h"): +def Data2VecAudio(model="model/text2vec-base-chinese/"): return data2vec.Data2VecAudio(model) diff --git a/modelcache/manager/scalar_data/sql_storage_sqlite.py b/modelcache/manager/scalar_data/sql_storage_sqlite.py index d4febdc..c0c7546 100644 --- a/modelcache/manager/scalar_data/sql_storage_sqlite.py +++ b/modelcache/manager/scalar_data/sql_storage_sqlite.py @@ -17,19 +17,6 @@ def __init__( self.create() def create(self): - # answer_table_sql = """CREATE TABLE IF NOT EXISTS `modelcache_llm_answer` ( - # `id` bigint(20) NOT NULL AUTO_INCREMENT comment '主键', - # `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', - # `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', - # `question` text NOT NULL comment 'question', - # `answer` text NOT NULL comment 'answer', - # `answer_type` int(11) NOT NULL comment 'answer_type', - # `hit_count` int(11) NOT NULL DEFAULT '0' comment 'hit_count', - # `model` varchar(1000) NOT NULL comment 'model', - # `embedding_data` blob NOT NULL comment 'embedding_data', - # PRIMARY KEY(`id`) - # ) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'modelcache_llm_answer'; - # """ answer_table_sql = """CREATE TABLE IF NOT EXISTS modelcache_llm_answer ( id INTEGER PRIMARY KEY AUTOINCREMENT, gmt_create TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -43,21 +30,6 @@ def create(self): ); """ - # log_table_sql = """CREATE TABLE IF NOT EXISTS `modelcache_query_log` ( - # `id` bigint(20) NOT NULL AUTO_INCREMENT comment '主键', - # `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', - # `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', - # `error_code` int(11) NOT NULL comment 'errorCode', - # `error_desc` varchar(1000) NOT NULL comment 'errorDesc', - # `cache_hit` varchar(100) NOT NULL comment 'cacheHit', - # `delta_time` float NOT NULL comment 'delta_time', - # `model` varchar(1000) NOT NULL comment 'model', - # `query` text NOT NULL comment 'query', - # `hit_query` text NOT NULL comment 'hitQuery', - # `answer` text NOT NULL comment 'answer', - # PRIMARY KEY(`id`) - # ) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'modelcache_query_log'; - # """ log_table_sql = """CREATE TABLE IF NOT EXISTS modelcache_query_log ( id INTEGER PRIMARY KEY AUTOINCREMENT, gmt_create TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -195,17 +167,21 @@ def mark_deleted(self, keys): def model_deleted(self, model_name): table_name = "modelcache_llm_answer" - delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) + delete_sql = "Delete from {} WHERE model=?".format(table_name) conn = sqlite3.connect(self._url) try: cursor = conn.cursor() - resp = cursor.execute(delete_sql) + cursor.execute(delete_sql, (model_name,)) conn.commit() + # get delete rows + deleted_rows_count = cursor.rowcount cursor.close() - conn.close() + except sqlite3.Error as e: + print(f"SQLite error: {e}") + deleted_rows_count = 0 # if except, return 0 finally: conn.close() - return resp + return deleted_rows_count def clear_deleted_data(self): pass From a7472d37b08a80f56b1956630a6a5eb7b8b61f5d Mon Sep 17 00:00:00 2001 From: liwenshi Date: Fri, 24 May 2024 12:33:34 +0800 Subject: [PATCH 48/98] feat: support huggingface/text-embeddings-inference for faster embedding inference --- modelcache/embedding/__init__.py | 4 +++ .../embedding/text_embeddings_inference.py | 31 +++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 modelcache/embedding/text_embeddings_inference.py diff --git a/modelcache/embedding/__init__.py b/modelcache/embedding/__init__.py index eb6ca80..410c92a 100644 --- a/modelcache/embedding/__init__.py +++ b/modelcache/embedding/__init__.py @@ -6,6 +6,7 @@ fasttext = LazyImport("fasttext", globals(), "modelcache.embedding.fasttext") paddlenlp = LazyImport("paddlenlp", globals(), "modelcache.embedding.paddlenlp") timm = LazyImport("timm", globals(), "modelcache.embedding.timm") +text_embeddings_inference = LazyImport("text_embeddings_inference", globals(), "modelcache.embedding.text_embeddings_inference") def Huggingface(model="sentence-transformers/all-mpnet-base-v2"): @@ -30,3 +31,6 @@ def PaddleNLP(model="ernie-3.0-medium-zh"): def Timm(model="resnet50", device="default"): return timm.Timm(model, device) + +def TextEmbeddingsInference(base_url, model): + return text_embeddings_inference.TextEmbeddingsInference(base_url, model) \ No newline at end of file diff --git a/modelcache/embedding/text_embeddings_inference.py b/modelcache/embedding/text_embeddings_inference.py new file mode 100644 index 0000000..87b34aa --- /dev/null +++ b/modelcache/embedding/text_embeddings_inference.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +import requests +import numpy as np +from modelcache.embedding.base import BaseEmbedding + +class TextEmbeddingsInference(BaseEmbedding): + def __init__(self, base_url: str, model: str): + self.base_url = base_url + self.model = model + self.headers = { + 'accept': 'application/json', + 'Content-Type': 'application/json', + } + self.__dimension = self.to_embeddings('test').shape[0] + def to_embeddings(self, data, **_): + json_data = { + 'input': data, + 'model': self.model, + } + + response = requests.post(self.base_url, headers=self.headers, json=json_data) + embedding = response.json()['data'][0]['embedding'] + return np.array(embedding) + + @property + def dimension(self): + """Embedding dimension. + + :return: embedding dimension + """ + return self.__dimension From 7afe7b172506c88c7995f51a8a7678b16864129d Mon Sep 17 00:00:00 2001 From: powerli2002 <845160705@qq.com> Date: Fri, 24 May 2024 15:56:07 +0800 Subject: [PATCH 49/98] Fix problems with deleting logs --- modelcache/manager/scalar_data/sql_storage.py | 7 +++++++ modelcache/manager/scalar_data/sql_storage_sqlite.py | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index 503217a..ab83601 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -149,6 +149,10 @@ def mark_deleted(self, keys): def model_deleted(self, model_name): table_name = "cache_codegpt_answer" delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) + + table_log_name = "modelcache_query_log" + delete_log_sql = "Delete from {} WHERE model='{}'".format(table_log_name, model_name) + conn = self.pool.connection() # 使用连接执行删除数据操作 try: @@ -156,6 +160,9 @@ def model_deleted(self, model_name): # 执行删除数据操作 resp = cursor.execute(delete_sql) conn.commit() + # 执行删除该模型对应日志操作 resp_log行数不返回 + resp_log = cursor.execute(delete_log_sql) + conn.commit() # 分别提交事务 finally: # 关闭连接,将连接返回给连接池 conn.close() diff --git a/modelcache/manager/scalar_data/sql_storage_sqlite.py b/modelcache/manager/scalar_data/sql_storage_sqlite.py index 495fbf7..e8281ae 100644 --- a/modelcache/manager/scalar_data/sql_storage_sqlite.py +++ b/modelcache/manager/scalar_data/sql_storage_sqlite.py @@ -260,11 +260,16 @@ def mark_deleted(self, keys): def model_deleted(self, model_name): table_name = "modelcache_llm_answer" delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) + + table_log_name = "modelcache_query_log" + delete_log_sql = "Delete from {} WHERE model='{}'".format(table_log_name, model_name) conn = sqlite3.connect(self._url) try: cursor = conn.cursor() resp = cursor.execute(delete_sql) conn.commit() + resp = cursor.execute(delete_log_sql) + conn.commit() cursor.close() conn.close() finally: From 02706bef0dbb1a5e85279d00de10ba6594550b9f Mon Sep 17 00:00:00 2001 From: powerli2002 <845160705@qq.com> Date: Fri, 24 May 2024 16:15:20 +0800 Subject: [PATCH 50/98] Fix problems with deleting logs --- modelcache/manager/scalar_data/sql_storage_sqlite.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/modelcache/manager/scalar_data/sql_storage_sqlite.py b/modelcache/manager/scalar_data/sql_storage_sqlite.py index c0c7546..3c68f9e 100644 --- a/modelcache/manager/scalar_data/sql_storage_sqlite.py +++ b/modelcache/manager/scalar_data/sql_storage_sqlite.py @@ -168,6 +168,9 @@ def mark_deleted(self, keys): def model_deleted(self, model_name): table_name = "modelcache_llm_answer" delete_sql = "Delete from {} WHERE model=?".format(table_name) + + table_log_name = "modelcache_query_log" + delete_log_sql = "Delete from {} WHERE model=?".format(table_log_name) conn = sqlite3.connect(self._url) try: cursor = conn.cursor() @@ -175,6 +178,9 @@ def model_deleted(self, model_name): conn.commit() # get delete rows deleted_rows_count = cursor.rowcount + + cursor.execute(delete_log_sql, (model_name,)) + conn.commit() cursor.close() except sqlite3.Error as e: print(f"SQLite error: {e}") From 8124dc11cd115be0c936f88f7e378dfbd9bd5abd Mon Sep 17 00:00:00 2001 From: fuhui Date: Sat, 25 May 2024 07:27:19 +0800 Subject: [PATCH 51/98] Add the examples/embedding directory for adding test cases --- examples/embedding/__init__.py | 1 + 1 file changed, 1 insertion(+) create mode 100644 examples/embedding/__init__.py diff --git a/examples/embedding/__init__.py b/examples/embedding/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/examples/embedding/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- From 01acdcdf0948717ac8d9de4c5be3f515b1148686 Mon Sep 17 00:00:00 2001 From: liwenshipro Date: Sat, 25 May 2024 09:37:27 +0800 Subject: [PATCH 52/98] fix: rename huggingface TEI class --- modelcache/embedding/__init__.py | 4 ++-- .../{text_embeddings_inference.py => huggingface_tei.py} | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) rename modelcache/embedding/{text_embeddings_inference.py => huggingface_tei.py} (95%) diff --git a/modelcache/embedding/__init__.py b/modelcache/embedding/__init__.py index 410c92a..b61372c 100644 --- a/modelcache/embedding/__init__.py +++ b/modelcache/embedding/__init__.py @@ -6,7 +6,7 @@ fasttext = LazyImport("fasttext", globals(), "modelcache.embedding.fasttext") paddlenlp = LazyImport("paddlenlp", globals(), "modelcache.embedding.paddlenlp") timm = LazyImport("timm", globals(), "modelcache.embedding.timm") -text_embeddings_inference = LazyImport("text_embeddings_inference", globals(), "modelcache.embedding.text_embeddings_inference") +huggingface_tei = LazyImport("huggingface_tei", globals(), "modelcache.embedding.huggingface_tei") def Huggingface(model="sentence-transformers/all-mpnet-base-v2"): @@ -33,4 +33,4 @@ def Timm(model="resnet50", device="default"): return timm.Timm(model, device) def TextEmbeddingsInference(base_url, model): - return text_embeddings_inference.TextEmbeddingsInference(base_url, model) \ No newline at end of file + return huggingface_tei.HuggingfaceTEI(base_url, model) \ No newline at end of file diff --git a/modelcache/embedding/text_embeddings_inference.py b/modelcache/embedding/huggingface_tei.py similarity index 95% rename from modelcache/embedding/text_embeddings_inference.py rename to modelcache/embedding/huggingface_tei.py index 87b34aa..94075fe 100644 --- a/modelcache/embedding/text_embeddings_inference.py +++ b/modelcache/embedding/huggingface_tei.py @@ -3,7 +3,7 @@ import numpy as np from modelcache.embedding.base import BaseEmbedding -class TextEmbeddingsInference(BaseEmbedding): +class HuggingfaceTEI(BaseEmbedding): def __init__(self, base_url: str, model: str): self.base_url = base_url self.model = model @@ -12,6 +12,7 @@ def __init__(self, base_url: str, model: str): 'Content-Type': 'application/json', } self.__dimension = self.to_embeddings('test').shape[0] + def to_embeddings(self, data, **_): json_data = { 'input': data, From 18d70dfee2a4f33e760a78581a57d0688cad0073 Mon Sep 17 00:00:00 2001 From: liwenshipro Date: Sat, 25 May 2024 10:28:12 +0800 Subject: [PATCH 53/98] add huggingface tei example --- examples/embedding/huggingface_tei_example.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 examples/embedding/huggingface_tei_example.py diff --git a/examples/embedding/huggingface_tei_example.py b/examples/embedding/huggingface_tei_example.py new file mode 100644 index 0000000..152834f --- /dev/null +++ b/examples/embedding/huggingface_tei_example.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +import sys +sys.path.append(".") +from modelcache.embedding.huggingface_tei import HuggingfaceTEI + +''' +run tei server: +text-embeddings-router --model-id BAAI/bge-large-zh-v1.5 --port 8080 +''' + +def run(): + tei_instance = HuggingfaceTEI('http://127.0.0.1:8080/v1/embeddings', 'BAAI/bge-large-zh-v1.5') + print('dimenson', tei_instance.dimension) + print('embedding', tei_instance.to_embeddings('hello')) + +if __name__ == '__main__': + run() \ No newline at end of file From 7605a824251a4845a2f892891a8cffcea71c7db5 Mon Sep 17 00:00:00 2001 From: liwenshipro Date: Sat, 25 May 2024 10:50:23 +0800 Subject: [PATCH 54/98] fix: rename huggingface tei --- modelcache/embedding/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modelcache/embedding/__init__.py b/modelcache/embedding/__init__.py index b61372c..5684a2d 100644 --- a/modelcache/embedding/__init__.py +++ b/modelcache/embedding/__init__.py @@ -32,5 +32,5 @@ def PaddleNLP(model="ernie-3.0-medium-zh"): def Timm(model="resnet50", device="default"): return timm.Timm(model, device) -def TextEmbeddingsInference(base_url, model): +def HuggingfaceTEI(base_url, model): return huggingface_tei.HuggingfaceTEI(base_url, model) \ No newline at end of file From 5277b72529cb89d89ce38d647589ff923284a592 Mon Sep 17 00:00:00 2001 From: powerli2002 <845160705@qq.com> Date: Mon, 24 Jun 2024 17:01:28 +0800 Subject: [PATCH 55/98] 20240624_fix_cache_eviction --- modelcache/manager/eviction_manager.py | 4 +- modelcache/manager/scalar_data/sql_storage.py | 54 ++++++++++++++++--- reference_doc/create_table.sql | 5 +- 3 files changed, 51 insertions(+), 12 deletions(-) diff --git a/modelcache/manager/eviction_manager.py b/modelcache/manager/eviction_manager.py index 0a6a406..d44996c 100644 --- a/modelcache/manager/eviction_manager.py +++ b/modelcache/manager/eviction_manager.py @@ -29,10 +29,10 @@ def check_evict(self): return True return False - def delete(self): + def delete(self,model): mark_ids = self._scalar_storage.get_ids(deleted=True) self._scalar_storage.clear_deleted_data() - self._vector_base.delete(mark_ids) + self._vector_base.delete(mark_ids,model) self.delete_count += 1 if self.delete_count >= self.REBUILD_CONDITION: self.rebuild() diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index ab83601..eb776c1 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -41,15 +41,15 @@ def _insert(self, data: List): model = data[3] answer_type = 0 embedding_data = embedding_data.tobytes() + is_deleted = 0 table_name = "cache_codegpt_answer" - insert_sql = "INSERT INTO {} (question, answer, answer_type, model, embedding_data) VALUES (%s, %s, %s, %s, _binary%s)".format(table_name) - + insert_sql = "INSERT INTO {} (question, answer, answer_type, model, embedding_data, is_deleted) VALUES (%s, %s, %s, %s, _binary%s, %s)".format(table_name) conn = self.pool.connection() try: with conn.cursor() as cursor: # 执行插入数据操作 - values = (question, answer, answer_type, model, embedding_data) + values = (question, answer, answer_type, model, embedding_data, is_deleted) cursor.execute(insert_sql, values) conn.commit() id = cursor.lastrowid @@ -127,18 +127,30 @@ def update_hit_count_by_id(self, primary_id: int): conn.close() def get_ids(self, deleted=True): - pass + table_name = "cache_codegpt_answer" + state = 1 if deleted else 0 + query_sql = "Select id FROM {} WHERE is_deleted = {}".format(table_name, state) + + conn = self.pool.connection() + try: + with conn.cursor() as cursor: + cursor.execute(query_sql) + ids = [row[0] for row in cursor.fetchall()] + finally: + conn.close() + + return ids def mark_deleted(self, keys): table_name = "cache_codegpt_answer" - delete_sql = "Delete from {} WHERE id in ({})".format(table_name, ",".join([str(i) for i in keys])) + mark_sql = " update {} set is_deleted=1 WHERE id in ({})".format(table_name, ",".join([str(i) for i in keys])) # 从连接池中获取连接 conn = self.pool.connection() try: with conn.cursor() as cursor: # 执行删除数据操作 - cursor.execute(delete_sql) + cursor.execute(mark_sql) delete_count = cursor.rowcount conn.commit() finally: @@ -169,10 +181,36 @@ def model_deleted(self, model_name): return resp def clear_deleted_data(self): - pass + table_name = "cache_codegpt_answer" + delete_sql = "DELETE FROM {} WHERE is_deleted = 1".format(table_name) + + conn = self.pool.connection() + try: + with conn.cursor() as cursor: + cursor.execute(delete_sql) + delete_count = cursor.rowcount + conn.commit() + finally: + conn.close() + + return delete_count def count(self, state: int = 0, is_all: bool = False): - pass + table_name = "cache_codegpt_answer" + if is_all: + count_sql = "SELECT COUNT(*) FROM {}".format(table_name) + else: + count_sql = "SELECT COUNT(*) FROM {} WHERE is_deleted = {}".format(table_name,state) + + conn = self.pool.connection() + try: + with conn.cursor() as cursor: + cursor.execute(count_sql) + num = cursor.fetchone()[0] + finally: + conn.close() + + return num def close(self): pass diff --git a/reference_doc/create_table.sql b/reference_doc/create_table.sql index 7c00be9..27c0beb 100644 --- a/reference_doc/create_table.sql +++ b/reference_doc/create_table.sql @@ -1,4 +1,4 @@ -CREATE TABLE `modelcache_llm_answer` ( +CREATE TABLE `cache_codegpt_answer` ( `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT comment '主键', `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', @@ -8,8 +8,9 @@ CREATE TABLE `modelcache_llm_answer` ( `hit_count` int(11) NOT NULL DEFAULT '0' comment 'hit_count', `model` varchar(1000) NOT NULL comment 'model', `embedding_data` blob NOT NULL comment 'embedding_data', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'delete state(0 Not deleted,-1 deleted)', PRIMARY KEY(`id`) -) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'modelcache_llm_answer'; +) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'cache_codegpt_answer'; CREATE TABLE `modelcache_query_log` ( From 021a88bf2aecc6c414274661f78d1a3f04a87199 Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 13 Sep 2024 11:21:52 +0800 Subject: [PATCH 56/98] update readme --- README.md | 4 +- README_CN.md | 2 +- .../manager/scalar_data/sql_storage_sqlite.py | 89 ++++++++----------- modelcache_mm/manager/vector_data/faiss.py | 75 ++++++++++++++++ modelcache_mm/manager/vector_data/manager.py | 33 ++----- 5 files changed, 122 insertions(+), 81 deletions(-) create mode 100644 modelcache_mm/manager/vector_data/faiss.py diff --git a/README.md b/README.md index fd5331e..6ebc5f7 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

    @@ -262,7 +262,7 @@ In ModelCache, we adopted the main idea of GPTCache, includes core modules: ada - [ ] Support ElasticSearch ### Vector Storage - [ ] Adapts Faiss storage in multimodal scenarios. -### Rank能力 +### Ranking - [ ] Add ranking model to refine the order of data after embedding recall. ### Service - [ ] Supports FastAPI. diff --git a/README_CN.md b/README_CN.md index 4eba507..646c803 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,6 +1,6 @@

    -Codefuse-ModelCache +ModelCache

    diff --git a/modelcache_mm/manager/scalar_data/sql_storage_sqlite.py b/modelcache_mm/manager/scalar_data/sql_storage_sqlite.py index 5d4d422..9d16c00 100644 --- a/modelcache_mm/manager/scalar_data/sql_storage_sqlite.py +++ b/modelcache_mm/manager/scalar_data/sql_storage_sqlite.py @@ -16,47 +16,34 @@ def __init__( self.create() def create(self): - # answer_table_sql = """CREATE TABLE IF NOT EXISTS `modelcache_llm_answer` ( - # `id` bigint(20) NOT NULL AUTO_INCREMENT comment '主键', - # `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', - # `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', - # `question` text NOT NULL comment 'question', - # `answer` text NOT NULL comment 'answer', - # `answer_type` int(11) NOT NULL comment 'answer_type', - # `hit_count` int(11) NOT NULL DEFAULT '0' comment 'hit_count', - # `model` varchar(1000) NOT NULL comment 'model', - # `embedding_data` blob NOT NULL comment 'embedding_data', - # PRIMARY KEY(`id`) - # ) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'modelcache_llm_answer'; - # """ - answer_table_sql = """CREATE TABLE IF NOT EXISTS modelcache_llm_answer ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - gmt_create TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - question TEXT NOT NULL, - answer TEXT NOT NULL, - answer_type INTEGER NOT NULL, - hit_count INTEGER NOT NULL DEFAULT 0, - model VARCHAR(1000) NOT NULL, - embedding_data BLOB NOT NULL - ); - """ + # answer_table_sql = """CREATE TABLE IF NOT EXISTS modelcache_llm_answer ( + # id INTEGER PRIMARY KEY AUTOINCREMENT, + # gmt_create TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + # gmt_modified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + # question TEXT NOT NULL, + # answer TEXT NOT NULL, + # answer_type INTEGER NOT NULL, + # hit_count INTEGER NOT NULL DEFAULT 0, + # model VARCHAR(1000) NOT NULL, + # embedding_data BLOB NOT NULL + # ); + # """ + + answer_table_sql = """CREATE TABLE IF NOT EXISTS `open_cache_mm_answer` ( + `id` INTEGER PRIMARY KEY AUTOINCREMENT, + `gmt_create` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + `gmt_modified` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + `question_text` TEXT NOT NULL, + `image_url` VARCHAR(2048) NOT NULL, + `answer` TEXT NOT NULL, + `answer_type` INTEGER NOT NULL, + `hit_count` INTEGER NOT NULL DEFAULT 0, + `model` VARCHAR(1000) NOT NULL, + `image_raw` BLOB DEFAULT NULL, + `image_id` VARCHAR(1000) DEFAULT NULL +); + """ - # log_table_sql = """CREATE TABLE IF NOT EXISTS `modelcache_query_log` ( - # `id` bigint(20) NOT NULL AUTO_INCREMENT comment '主键', - # `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', - # `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', - # `error_code` int(11) NOT NULL comment 'errorCode', - # `error_desc` varchar(1000) NOT NULL comment 'errorDesc', - # `cache_hit` varchar(100) NOT NULL comment 'cacheHit', - # `delta_time` float NOT NULL comment 'delta_time', - # `model` varchar(1000) NOT NULL comment 'model', - # `query` text NOT NULL comment 'query', - # `hit_query` text NOT NULL comment 'hitQuery', - # `answer` text NOT NULL comment 'answer', - # PRIMARY KEY(`id`) - # ) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'modelcache_query_log'; - # """ log_table_sql = """CREATE TABLE IF NOT EXISTS modelcache_query_log ( id INTEGER PRIMARY KEY AUTOINCREMENT, gmt_create TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -85,19 +72,19 @@ def create(self): def _insert(self, data: List): answer = data[0] - question = data[1] - embedding_data = data[2] - model = data[3] + text = data[1] + image_url = data[2] + image_id = data[3] + model = data[4] answer_type = 0 - embedding_data = embedding_data.tobytes() - table_name = "modelcache_llm_answer" - insert_sql = "INSERT INTO {} (question, answer, answer_type, model, embedding_data) VALUES (?, ?, ?, ?, ?)".format(table_name) + table_name = "open_cache_mm_answer" + insert_sql = "INSERT INTO {} (question_text, image_url, image_id, answer, answer_type, model) VALUES (?, ?, ?, ?, ?, ?)".format(table_name) conn = sqlite3.connect(self._url) try: cursor = conn.cursor() - values = (question, answer, answer_type, model, embedding_data) + values = (text, image_url, image_id, answer, answer_type, model) cursor.execute(insert_sql, values) conn.commit() id = cursor.lastrowid @@ -141,7 +128,7 @@ def insert_query_resp(self, query_resp, **kwargs): conn.close() def get_data_by_id(self, key: int): - table_name = "modelcache_llm_answer" + table_name = "open_cache_mm_answer" query_sql = "select question, answer, embedding_data, model from {} where id={}".format(table_name, key) conn = sqlite3.connect(self._url) try: @@ -160,7 +147,7 @@ def get_data_by_id(self, key: int): return None def update_hit_count_by_id(self, primary_id: int): - table_name = "modelcache_llm_answer" + table_name = "open_cache_mm_answer" update_sql = "UPDATE {} SET hit_count = hit_count+1 WHERE id={}".format(table_name, primary_id) conn = sqlite3.connect(self._url) @@ -178,7 +165,7 @@ def get_ids(self, deleted=True): pass def mark_deleted(self, keys): - table_name = "modelcache_llm_answer" + table_name = "open_cache_mm_answer" delete_sql = "Delete from {} WHERE id in ({})".format(table_name, ",".join([str(i) for i in keys])) conn = sqlite3.connect(self._url) try: @@ -193,7 +180,7 @@ def mark_deleted(self, keys): return delete_count def model_deleted(self, model_name): - table_name = "modelcache_llm_answer" + table_name = "open_cache_mm_answer" delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) conn = sqlite3.connect(self._url) try: diff --git a/modelcache_mm/manager/vector_data/faiss.py b/modelcache_mm/manager/vector_data/faiss.py new file mode 100644 index 0000000..4e34b46 --- /dev/null +++ b/modelcache_mm/manager/vector_data/faiss.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +import os +from typing import List +import numpy as np +from modelcache_mm.manager.vector_data.base import VectorBase, VectorData +from modelcache_mm.utils import import_faiss +import_faiss() +import faiss # pylint: disable=C0413 + + +class Faiss(VectorBase): + def __init__(self, + index_file_path, + dimension: int = 0, + top_k: int = 1 + ): + self._dimension = dimension + self._index_file_path = index_file_path + self._index = faiss.index_factory(self._dimension, "IDMap,Flat", faiss.METRIC_L2) + self._top_k = top_k + if os.path.isfile(index_file_path): + self._index = faiss.read_index(index_file_path) + + def add(self, datas: List[VectorData], model=None, mm_type=None): + data_array, id_array = map(list, zip(*((data.data, data.id) for data in datas))) + np_data = np.array(data_array).astype("float32") + ids = np.array(id_array) + print('insert_np_data: {}'.format(np_data)) + print('insert_np_data: {}'.format(np_data.shape)) + self._index.add_with_ids(np_data, ids) + + def search(self, data: np.ndarray, top_k: int, model, mm_type='mm'): + if self._index.ntotal == 0: + return None + if top_k == -1: + top_k = self._top_k + np_data = np.array(data).astype("float32").reshape(1, -1) + dist, ids = self._index.search(np_data, top_k) + ids = [int(i) for i in ids[0]] + return list(zip(dist[0], ids)) + + def rebuild_col(self, ids=None): + try: + self._index.reset() + except Exception as e: + return f"An error occurred during index rebuild: {e}" + + def rebuild(self, ids=None): + return True + + def delete(self, ids): + ids_to_remove = np.array(ids) + self._index.remove_ids(faiss.IDSelectorBatch(ids_to_remove.size, faiss.swig_ptr(ids_to_remove))) + + def create(self, model=None, mm_type=None): + pass + # collection_name_model = get_mm_index_name(model, mm_type) + # try: + # index_prefix = get_mm_index_prefix(model, mm_type) + # self.create_index(collection_name_model, mm_type, index_prefix) + # except Exception as e: + # raise ValueError(str(e)) + # return 'success' + + def flush(self): + faiss.write_index(self._index, self._index_file_path) + + def close(self): + self.flush() + + def rebuild_idx(self, model): + pass + + def count(self): + return self._index.ntotal diff --git a/modelcache_mm/manager/vector_data/manager.py b/modelcache_mm/manager/vector_data/manager.py index 1c161cf..0bf09be 100644 --- a/modelcache_mm/manager/vector_data/manager.py +++ b/modelcache_mm/manager/vector_data/manager.py @@ -98,36 +98,15 @@ def get(name, **kwargs): t_dimension=t_dimension, ) elif name == "faiss": - from modelcache.manager.vector_data.faiss import Faiss - + from modelcache_mm.manager.vector_data.faiss import Faiss dimension = kwargs.get("dimension", DIMENSION) - index_path = kwargs.pop("index_path", FAISS_INDEX_PATH) VectorBase.check_dimension(dimension) - vector_base = Faiss( - index_file_path=index_path, dimension=dimension, top_k=top_k - ) - elif name == "chromadb": - from modelcache.manager.vector_data.chroma import Chromadb - - client_settings = kwargs.get("client_settings", None) - persist_directory = kwargs.get("persist_directory", None) - collection_name = kwargs.get("collection_name", COLLECTION_NAME) - vector_base = Chromadb( - client_settings=client_settings, - persist_directory=persist_directory, - collection_name=collection_name, - top_k=top_k, - ) - elif name == "hnswlib": - from modelcache.manager.vector_data.hnswlib_store import Hnswlib - dimension = kwargs.get("dimension", DIMENSION) - index_path = kwargs.pop("index_path", "./hnswlib_index.bin") - max_elements = kwargs.pop("max_elements", 100000) - VectorBase.check_dimension(dimension) - vector_base = Hnswlib( - index_file_path=index_path, dimension=dimension, - top_k=top_k, max_elements=max_elements + index_path = kwargs.pop("index_path", FAISS_INDEX_PATH) + vector_base = Faiss( + index_file_path=index_path, + dimension=dimension, + top_k=top_k ) else: raise NotFoundError("vector store", name) From ebdc47114fe9392733bf03ac3e3dd9876e7a3c55 Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 13 Sep 2024 15:10:20 +0800 Subject: [PATCH 57/98] fix invalid parameter default value --- modelcache/adapter/adapter.py | 23 +++++++++++--------- modelcache/adapter/adapter_query.py | 3 --- modelcache/embedding/data2vec.py | 13 +++++------ modelcache_mm/manager/vector_data/faiss.py | 2 -- modelcache_mm/manager/vector_data/manager.py | 2 +- 5 files changed, 20 insertions(+), 23 deletions(-) diff --git a/modelcache/adapter/adapter.py b/modelcache/adapter/adapter.py index 1428da2..5eba2ea 100644 --- a/modelcache/adapter/adapter.py +++ b/modelcache/adapter/adapter.py @@ -1,14 +1,12 @@ # -*- coding: utf-8 -*- import logging - -import openai from modelcache.adapter.adapter_query import adapt_query from modelcache.adapter.adapter_insert import adapt_insert from modelcache.adapter.adapter_remove import adapt_remove from modelcache.adapter.adapter_register import adapt_register -class ChatCompletion(openai.ChatCompletion): +class ChatCompletion(object): """Openai ChatCompletion Wrapper""" @classmethod @@ -26,13 +24,18 @@ def cache_data_convert(cache_data, cache_query): @classmethod def create_insert(cls, *args, **kwargs): - try: - return adapt_insert( - *args, - **kwargs - ) - except Exception as e: - return str(e) + # try: + # return adapt_insert( + # *args, + # **kwargs + # ) + # except Exception as e: + # return str(e) + + return adapt_insert( + *args, + **kwargs + ) @classmethod def create_remove(cls, *args, **kwargs): diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index 8f76bff..a8c90f1 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -22,14 +22,12 @@ def adapt_query(cache_data_convert, *args, **kwargs): extra_param=context.get("pre_embedding_func", None), prompts=chat_cache.config.prompts, ) - if cache_enable: embedding_data = time_cal( chat_cache.embedding_func, func_name="embedding", report_func=chat_cache.report.embedding, )(pre_embedding_data) - if cache_enable: cache_data_list = time_cal( chat_cache.data_manager.search, @@ -64,7 +62,6 @@ def adapt_query(cache_data_convert, *args, **kwargs): if rank_threshold_long < min_rank else rank_threshold_long ) - if cache_data_list is None or len(cache_data_list) == 0: rank_pre = -1.0 else: diff --git a/modelcache/embedding/data2vec.py b/modelcache/embedding/data2vec.py index 3f450fb..8c1ab2f 100644 --- a/modelcache/embedding/data2vec.py +++ b/modelcache/embedding/data2vec.py @@ -14,24 +14,23 @@ def mean_pooling(model_output, attention_mask): class Data2VecAudio(BaseEmbedding): - def __init__(self, model: str = "model/text2vec-base-chinese/"): + def __init__(self, model): current_dir = os.path.dirname(os.path.abspath(__file__)) parent_dir = os.path.dirname(current_dir) model_dir = os.path.dirname(parent_dir) - model = os.path.join(model_dir, model) + model_path = os.path.join(model_dir, model) + + self.device = 'cuda' if torch.cuda.is_available() else 'cpu' + self.tokenizer = BertTokenizer.from_pretrained(model_path, local_files_only=True) + self.model = BertModel.from_pretrained(model_path, local_files_only=True) try: self.__dimension = self.model.config.hidden_size except Exception: from transformers import AutoConfig - config = AutoConfig.from_pretrained(model) self.__dimension = config.hidden_size - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.tokenizer = BertTokenizer.from_pretrained(model, local_files_only=True) - self.model = BertModel.from_pretrained(model, local_files_only=True) - def to_embeddings(self, data, **_): encoded_input = self.tokenizer(data, padding=True, truncation=True, return_tensors='pt') num_tokens = sum(map(len, encoded_input['input_ids'])) diff --git a/modelcache_mm/manager/vector_data/faiss.py b/modelcache_mm/manager/vector_data/faiss.py index 4e34b46..75b2d3a 100644 --- a/modelcache_mm/manager/vector_data/faiss.py +++ b/modelcache_mm/manager/vector_data/faiss.py @@ -25,8 +25,6 @@ def add(self, datas: List[VectorData], model=None, mm_type=None): data_array, id_array = map(list, zip(*((data.data, data.id) for data in datas))) np_data = np.array(data_array).astype("float32") ids = np.array(id_array) - print('insert_np_data: {}'.format(np_data)) - print('insert_np_data: {}'.format(np_data.shape)) self._index.add_with_ids(np_data, ids) def search(self, data: np.ndarray, top_k: int, model, mm_type='mm'): diff --git a/modelcache_mm/manager/vector_data/manager.py b/modelcache_mm/manager/vector_data/manager.py index 0bf09be..c10decc 100644 --- a/modelcache_mm/manager/vector_data/manager.py +++ b/modelcache_mm/manager/vector_data/manager.py @@ -2,7 +2,7 @@ from modelcache_mm.utils.error import NotFoundError, ParamError TOP_K = 1 -FAISS_INDEX_PATH = "faiss.index" +FAISS_INDEX_PATH = "mm_faiss.index" DIMENSION = 0 MILVUS_HOST = "localhost" MILVUS_PORT = 19530 From a2c63903330407802708cdf84c3f332ccf57f10e Mon Sep 17 00:00:00 2001 From: fuhui Date: Fri, 13 Sep 2024 15:17:10 +0800 Subject: [PATCH 58/98] fix vecotr_data/redis.py --- modelcache/adapter/adapter.py | 19 ++--- modelcache/manager/vector_data/redis.py | 103 ------------------------ 2 files changed, 7 insertions(+), 115 deletions(-) diff --git a/modelcache/adapter/adapter.py b/modelcache/adapter/adapter.py index 5eba2ea..452be7b 100644 --- a/modelcache/adapter/adapter.py +++ b/modelcache/adapter/adapter.py @@ -24,18 +24,13 @@ def cache_data_convert(cache_data, cache_query): @classmethod def create_insert(cls, *args, **kwargs): - # try: - # return adapt_insert( - # *args, - # **kwargs - # ) - # except Exception as e: - # return str(e) - - return adapt_insert( - *args, - **kwargs - ) + try: + return adapt_insert( + *args, + **kwargs + ) + except Exception as e: + return str(e) @classmethod def create_remove(cls, *args, **kwargs): diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index 05618bc..afa1088 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -1,21 +1,5 @@ # -*- coding: utf-8 -*- from typing import List -<<<<<<< HEAD - -import numpy as np -from modelcache.manager.vector_data.base import VectorBase, VectorData -from modelcache.utils import import_redis -from redis.commands.search.query import Query -from redis.commands.search.indexDefinition import IndexDefinition, IndexType -from modelcache.utils.log import modelcache_log - -import_redis() -# -# from redis.commands.search.indexDefinition import IndexDefinition, IndexType -# from redis.commands.search.query import Query -# from redis.commands.search.field import TagField, VectorField -# from redis.client import Redis -======= import numpy as np from redis.commands.search.indexDefinition import IndexDefinition, IndexType from redis.commands.search.query import Query @@ -28,7 +12,6 @@ from modelcache.utils.index_util import get_index_name from modelcache.utils.index_util import get_index_prefix import_redis() ->>>>>>> main class RedisVectorStore(VectorBase): @@ -39,12 +22,6 @@ def __init__( username: str = "", password: str = "", dimension: int = 0, -<<<<<<< HEAD - collection_name: str = "gptcache", - top_k: int = 1, - namespace: str = "", - ): -======= top_k: int = 1, namespace: str = "", ): @@ -52,90 +29,18 @@ def __init__( raise ValueError( f"invalid `dim` param: {dimension} in the Redis vector store." ) ->>>>>>> main self._client = Redis( host=host, port=int(port), username=username, password=password ) self.top_k = top_k self.dimension = dimension -<<<<<<< HEAD - self.collection_name = collection_name - self.namespace = namespace - self.doc_prefix = f"{self.namespace}doc:" # Prefix with the specified namespace - self._create_collection(collection_name) -======= self.namespace = namespace self.doc_prefix = f"{self.namespace}doc:" ->>>>>>> main def _check_index_exists(self, index_name: str) -> bool: """Check if Redis index exists.""" try: self._client.ft(index_name).info() -<<<<<<< HEAD - except: # pylint: disable=W0702 - gptcache_log.info("Index does not exist") - return False - gptcache_log.info("Index already exists") - return True - - def _create_collection(self, collection_name): - if self._check_index_exists(collection_name): - gptcache_log.info( - "The %s already exists, and it will be used directly", collection_name - ) - else: - schema = ( - TagField("tag"), # Tag Field Name - VectorField( - "vector", # Vector Field Name - "FLAT", - { # Vector Index Type: FLAT or HNSW - "TYPE": "FLOAT32", # FLOAT32 or FLOAT64 - "DIM": self.dimension, # Number of Vector Dimensions - "DISTANCE_METRIC": "COSINE", # Vector Search Distance Metric - }, - ), - ) - definition = IndexDefinition( - prefix=[self.doc_prefix], index_type=IndexType.HASH - ) - - # create Index - self._client.ft(collection_name).create_index( - fields=schema, definition=definition - ) - - def mul_add(self, datas: List[VectorData]): - pipe = self._client.pipeline() - - for data in datas: - key: int = data.id - obj = { - "vector": data.data.astype(np.float32).tobytes(), - } - pipe.hset(f"{self.doc_prefix}{key}", mapping=obj) - - pipe.execute() - - def search(self, data: np.ndarray, top_k: int = -1): - query = ( - Query( - f"*=>[KNN {top_k if top_k > 0 else self.top_k} @vector $vec as score]" - ) - .sort_by("score") - .return_fields("id", "score") - .paging(0, top_k if top_k > 0 else self.top_k) - .dialect(2) - ) - query_params = {"vec": data.astype(np.float32).tobytes()} - results = ( - self._client.ft(self.collection_name) - .search(query, query_params=query_params) - .docs - ) - return [(float(result.score), int(result.id[len(self.doc_prefix):])) for result in results] -======= except: modelcache_log.info("Index does not exist") return False @@ -201,13 +106,10 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): .docs ) return [(float(result.distance), int(getattr(result, id_field_name))) for result in results] ->>>>>>> main def rebuild(self, ids=None) -> bool: pass -<<<<<<< HEAD -======= def rebuild_col(self, model): index_name_model = get_index_name(model) if self._check_index_exists(index_name_model): @@ -222,14 +124,10 @@ def rebuild_col(self, model): raise ValueError(str(e)) # return 'rebuild success' ->>>>>>> main def delete(self, ids) -> None: pipe = self._client.pipeline() for data_id in ids: pipe.delete(f"{self.doc_prefix}{data_id}") -<<<<<<< HEAD - pipe.execute() -======= pipe.execute() def create(self, model=None): @@ -239,4 +137,3 @@ def create(self, model=None): def get_index_by_name(self, index_name): pass ->>>>>>> main From 2b8d640fedc9dcd98312794c45702bb92f383e9c Mon Sep 17 00:00:00 2001 From: Hongen Peng Date: Fri, 13 Sep 2024 17:37:15 +0800 Subject: [PATCH 59/98] Update sql_storage.py table name update to "modelcache_llm_answer" --- modelcache/manager/scalar_data/sql_storage.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index eb776c1..845d7f7 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -43,7 +43,7 @@ def _insert(self, data: List): embedding_data = embedding_data.tobytes() is_deleted = 0 - table_name = "cache_codegpt_answer" + table_name = "modelcache_llm_answer" insert_sql = "INSERT INTO {} (question, answer, answer_type, model, embedding_data, is_deleted) VALUES (%s, %s, %s, %s, _binary%s, %s)".format(table_name) conn = self.pool.connection() try: @@ -91,7 +91,7 @@ def insert_query_resp(self, query_resp, **kwargs): conn.close() def get_data_by_id(self, key: int): - table_name = "cache_codegpt_answer" + table_name = "modelcache_llm_answer" query_sql = "select question, answer, embedding_data, model from {} where id={}".format(table_name, key) conn_start = time.time() conn = self.pool.connection() @@ -112,7 +112,7 @@ def get_data_by_id(self, key: int): return None def update_hit_count_by_id(self, primary_id: int): - table_name = "cache_codegpt_answer" + table_name = "modelcache_llm_answer" update_sql = "UPDATE {} SET hit_count = hit_count+1 WHERE id={}".format(table_name, primary_id) conn = self.pool.connection() From 554656797f716ae2e98c6d3bb00a0f2643a7ad4e Mon Sep 17 00:00:00 2001 From: Hongen Peng Date: Fri, 13 Sep 2024 17:38:33 +0800 Subject: [PATCH 60/98] Update sql_storage.py table name update to "modelcache_llm_answer" --- modelcache/manager/scalar_data/sql_storage.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index 845d7f7..52fd559 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -127,7 +127,7 @@ def update_hit_count_by_id(self, primary_id: int): conn.close() def get_ids(self, deleted=True): - table_name = "cache_codegpt_answer" + table_name = "modelcache_llm_answer" state = 1 if deleted else 0 query_sql = "Select id FROM {} WHERE is_deleted = {}".format(table_name, state) @@ -142,7 +142,7 @@ def get_ids(self, deleted=True): return ids def mark_deleted(self, keys): - table_name = "cache_codegpt_answer" + table_name = "modelcache_llm_answer" mark_sql = " update {} set is_deleted=1 WHERE id in ({})".format(table_name, ",".join([str(i) for i in keys])) # 从连接池中获取连接 @@ -159,7 +159,7 @@ def mark_deleted(self, keys): return delete_count def model_deleted(self, model_name): - table_name = "cache_codegpt_answer" + table_name = "modelcache_llm_answer" delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) table_log_name = "modelcache_query_log" @@ -181,7 +181,7 @@ def model_deleted(self, model_name): return resp def clear_deleted_data(self): - table_name = "cache_codegpt_answer" + table_name = "modelcache_llm_answer" delete_sql = "DELETE FROM {} WHERE is_deleted = 1".format(table_name) conn = self.pool.connection() @@ -196,7 +196,7 @@ def clear_deleted_data(self): return delete_count def count(self, state: int = 0, is_all: bool = False): - table_name = "cache_codegpt_answer" + table_name = "modelcache_llm_answer" if is_all: count_sql = "SELECT COUNT(*) FROM {}".format(table_name) else: From dd9093c61bc29feebb1ca00143513df52d54b057 Mon Sep 17 00:00:00 2001 From: Hongen Peng Date: Fri, 13 Sep 2024 17:39:45 +0800 Subject: [PATCH 61/98] Update create_table.sql table name update to "modelcache_llm_answer" --- reference_doc/create_table.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reference_doc/create_table.sql b/reference_doc/create_table.sql index 27c0beb..1d0f30a 100644 --- a/reference_doc/create_table.sql +++ b/reference_doc/create_table.sql @@ -1,4 +1,4 @@ -CREATE TABLE `cache_codegpt_answer` ( +CREATE TABLE `modelcache_llm_answer` ( `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT comment '主键', `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', From b6c4260134fd6ba88bb454f0ecf8328a119261f2 Mon Sep 17 00:00:00 2001 From: fuhui Date: Tue, 22 Oct 2024 10:23:36 +0800 Subject: [PATCH 62/98] add news in readme --- README_CN.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README_CN.md b/README_CN.md index 646c803..5959380 100644 --- a/README_CN.md +++ b/README_CN.md @@ -25,6 +25,7 @@ ModelCache - [致谢](#致谢) - [Contributing](#Contributing) ## 新闻 +- 🔥🔥[2024.10.22] 增加1024程序员节任务。 - 🔥🔥[2024.04.09] 增加了多租户场景中Redis Search存储和检索embedding的能力,可以将Cache和向量数据库的交互耗时降低至10ms内。 - 🔥🔥[2023.12.10] 增加llmEmb、onnx、paddlenlp、fasttext等LLM embedding框架,并增加timm 图片embedding框架,用于提供更丰富的embedding能力。 - 🔥🔥[2023.11.20] codefuse-ModelCache增加本地存储能力, 适配了嵌入式数据库sqlite、faiss,方便用户快速启动测试。 From fc003deea0fd1c88eb75e337fcc87b5139b841c3 Mon Sep 17 00:00:00 2001 From: charleschile Date: Sat, 16 Nov 2024 19:38:48 +0800 Subject: [PATCH 63/98] fastapi init --- fastapi4modelcache.py | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 fastapi4modelcache.py diff --git a/fastapi4modelcache.py b/fastapi4modelcache.py new file mode 100644 index 0000000..ae9058c --- /dev/null +++ b/fastapi4modelcache.py @@ -0,0 +1,3 @@ +import uvicorn +import asyncio +from fastapi import FastAPI, Request \ No newline at end of file From 642e1ca85fbd17c3008dd4a60d6a972af7fd4be5 Mon Sep 17 00:00:00 2001 From: charleschile Date: Sat, 16 Nov 2024 21:33:17 +0800 Subject: [PATCH 64/98] add fastapi4modelcache --- fastapi4modelcache.py | 151 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 150 insertions(+), 1 deletion(-) diff --git a/fastapi4modelcache.py b/fastapi4modelcache.py index ae9058c..58374c1 100644 --- a/fastapi4modelcache.py +++ b/fastapi4modelcache.py @@ -1,3 +1,152 @@ +# -*- coding: utf-8 -*- +import time import uvicorn import asyncio -from fastapi import FastAPI, Request \ No newline at end of file +import logging +import configparser +import json +from fastapi import FastAPI, Request, HTTPException +from pydantic import BaseModel +from concurrent.futures import ThreadPoolExecutor +from starlette.responses import PlainTextResponse + +from modelcache import cache +from modelcache.adapter import adapter +from modelcache.manager import CacheBase, VectorBase, get_data_manager +from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation +from modelcache.processor.pre import query_multi_splicing +from modelcache.processor.pre import insert_multi_splicing +from modelcache.utils.model_filter import model_blacklist_filter +from modelcache.embedding import Data2VecAudio + +#创建一个FastAPI实例 +app = FastAPI() + +class RequestData(BaseModel): + type: str + scope: dict = None + query: str = None + chat_info: dict = None + remove_type: str = None + id_list: list = [] + +data2vec = Data2VecAudio() +mysql_config = configparser.ConfigParser() +mysql_config.read('modelcache/config/mysql_config.ini') + +milvus_config = configparser.ConfigParser() +milvus_config.read('modelcache/config/milvus_config.ini') + +# redis_config = configparser.ConfigParser() +# redis_config.read('modelcache/config/redis_config.ini') + +# 初始化datamanager +data_manager = get_data_manager( + CacheBase("mysql", config=mysql_config), + VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config) +) + +# # 使用redis初始化datamanager +# data_manager = get_data_manager( +# CacheBase("mysql", config=mysql_config), +# VectorBase("redis", dimension=data2vec.dimension, redis_config=redis_config) +# ) + +cache.init( + embedding_func=data2vec.to_embeddings, + data_manager=data_manager, + similarity_evaluation=SearchDistanceEvaluation(), + query_pre_embedding_func=query_multi_splicing, + insert_pre_embedding_func=insert_multi_splicing, +) + +executor = ThreadPoolExecutor(max_workers=6) + +# 异步保存查询信息 +async def save_query_info(result, model, query, delta_time_log): + loop = asyncio.get_running_loop() + await loop.run_in_executor(executor, cache.data_manager.save_query_resp, result, model, json.dumps(query, ensure_ascii=False), delta_time_log) + + + +@app.get("/welcome", response_class=PlainTextResponse) +async def first_fastapi(): + return "hello, modelcache!" + +@app.post("/modelcache") +async def user_backend(request_data: RequestData): + # param parsing + try: + request_type = request_data.type + model = None + if request_data.scope: + model = request_data.scope.get('model', '').replace('-','_').replace('.', '_') + query = request_data.query + chat_info = request_data.chat_info + + except Exception as e: + result = {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + return result + + # model filter + filter_resp = model_blacklist_filter(model, request_type) + if isinstance(filter_resp, dict): + return filter_resp + + if request_type == 'query': + try: + start_time = time.time() + response = adapter.ChatCompletion.create_query(scope={"model": model}, query=query) + delta_time = f"{round(time.time() - start_time, 2)}s" + + if response is None: + result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} + # elif response in ['adapt_query_exception']: + elif isinstance(response, str): + result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, + "hit_query": '', "answer": ''} + else: + answer = response['data'] + hit_query = response['hitQuery'] + result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, "hit_query": hit_query, "answer": answer} + + delta_time_log = round(time.time() - start_time, 2) + asyncio.create_task(save_query_info(result, model, query, delta_time_log)) + return result + except Exception as e: + result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, + "hit_query": '', "answer": ''} + logging.info(f'result: {str(result)}') + return result + + if request_type == 'insert': + try: + response = adapter.ChatCompletion.create_insert(model=model, chat_info=chat_info) + if response == 'success': + return {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} + else: + return {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} + except Exception as e: + return {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} + + if request_type == 'remove': + response = adapter.ChatCompletion.create_remove(model=model, remove_type=request_data.remove_type, id_list=request_data.id_list) + if not isinstance(response, dict): + return {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} + + state = response.get('status') + if state == 'success': + return {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + return {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} + + if request_type == 'register': + response = adapter.ChatCompletion.create_register(model=model) + if response in ['create_success', 'already_exists']: + return {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + return {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} + +# TODO: 可以修改为在命令行中使用`uvicorn your_module_name:app --host 0.0.0.0 --port 5000 --reload`的命令启动 +if __name__ == '__main__': + uvicorn.run(app, host='0.0.0.0', port=5001) \ No newline at end of file From 72018ce28cfce262ee4dad4c1db0ee5d8f2f56c7 Mon Sep 17 00:00:00 2001 From: charleschile Date: Wed, 20 Nov 2024 13:17:55 +0800 Subject: [PATCH 65/98] fastapi for modelcache_demo --- fastapi4modelcache_demo.py | 162 ++++++++++++++++++ .../manager/scalar_data/sql_storage_sqlite.py | 3 +- 2 files changed, 163 insertions(+), 2 deletions(-) create mode 100644 fastapi4modelcache_demo.py diff --git a/fastapi4modelcache_demo.py b/fastapi4modelcache_demo.py new file mode 100644 index 0000000..624fc75 --- /dev/null +++ b/fastapi4modelcache_demo.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +import time +import uvicorn +import asyncio +import logging +# import configparser +import json +from fastapi import FastAPI, Request, HTTPException +from pydantic import BaseModel +from concurrent.futures import ThreadPoolExecutor +from starlette.responses import PlainTextResponse +import functools + +from modelcache import cache +from modelcache.adapter import adapter +from modelcache.manager import CacheBase, VectorBase, get_data_manager +from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation +from modelcache.processor.pre import query_multi_splicing +from modelcache.processor.pre import insert_multi_splicing +from modelcache.utils.model_filter import model_blacklist_filter +from modelcache.embedding import Data2VecAudio + +# 创建一个FastAPI实例 +app = FastAPI() + +class RequestData(BaseModel): + type: str + scope: dict = None + query: str = None + chat_info: list = None + remove_type: str = None + id_list: list = [] + +data2vec = Data2VecAudio() + +data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension)) + +cache.init( + embedding_func=data2vec.to_embeddings, + data_manager=data_manager, + similarity_evaluation=SearchDistanceEvaluation(), + query_pre_embedding_func=query_multi_splicing, + insert_pre_embedding_func=insert_multi_splicing, +) + +executor = ThreadPoolExecutor(max_workers=6) + +# 异步保存查询信息 +async def save_query_info_fastapi(result, model, query, delta_time_log): + loop = asyncio.get_running_loop() + func = functools.partial(cache.data_manager.save_query_resp, result, model=model, query=json.dumps(query, ensure_ascii=False), delta_time=delta_time_log) + await loop.run_in_executor(None, func) + + + +@app.get("/welcome", response_class=PlainTextResponse) +async def first_fastapi(): + return "hello, modelcache!" + +@app.post("/modelcache") +async def user_backend(request: Request): + try: + raw_body = await request.body() + # 解析字符串为JSON对象 + if isinstance(raw_body, bytes): + raw_body = raw_body.decode("utf-8") + if isinstance(raw_body, str): + try: + # 尝试将字符串解析为JSON对象 + request_data = json.loads(raw_body) + except json.JSONDecodeError: + # 如果无法解析,返回格式错误 + raise HTTPException(status_code=400, detail="Invalid JSON format") + else: + request_data = raw_body + + # 确保request_data是字典对象 + if isinstance(request_data, str): + try: + request_data = json.loads(request_data) + except json.JSONDecodeError: + raise HTTPException(status_code=400, detail="Invalid JSON format") + + request_type = request_data.get('type') + model = None + if 'scope' in request_data: + model = request_data['scope'].get('model', '').replace('-', '_').replace('.', '_') + query = request_data.get('query') + chat_info = request_data.get('chat_info') + + if not request_type or request_type not in ['query', 'insert', 'remove', 'detox']: + raise HTTPException(status_code=400, detail="Type exception, should be one of ['query', 'insert', 'remove', 'detox']") + + except Exception as e: + request_data = raw_body if 'raw_body' in locals() else None + result = { + "errorCode": 103, + "errorDesc": str(e), + "cacheHit": False, + "delta_time": 0, + "hit_query": '', + "answer": '', + "para_dict": request_data + } + return result + + + # model filter + filter_resp = model_blacklist_filter(model, request_type) + if isinstance(filter_resp, dict): + return filter_resp + + if request_type == 'query': + try: + start_time = time.time() + response = adapter.ChatCompletion.create_query(scope={"model": model}, query=query) + delta_time = f"{round(time.time() - start_time, 2)}s" + + if response is None: + result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} + elif response in ['adapt_query_exception']: + # elif isinstance(response, str): + result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, + "hit_query": '', "answer": ''} + else: + answer = response['data'] + hit_query = response['hitQuery'] + result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, "hit_query": hit_query, "answer": answer} + + delta_time_log = round(time.time() - start_time, 2) + asyncio.create_task(save_query_info_fastapi(result, model, query, delta_time_log)) + return result + except Exception as e: + result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, + "hit_query": '', "answer": ''} + logging.info(f'result: {str(result)}') + return result + + if request_type == 'insert': + try: + response = adapter.ChatCompletion.create_insert(model=model, chat_info=chat_info) + if response == 'success': + return {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} + else: + return {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} + except Exception as e: + return {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} + + if request_type == 'remove': + response = adapter.ChatCompletion.create_remove(model=model, remove_type=request_data.get("remove_type"), id_list=request_data.get("id_list")) + if not isinstance(response, dict): + return {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} + + state = response.get('status') + if state == 'success': + return {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + return {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} + +# TODO: 可以修改为在命令行中使用`uvicorn your_module_name:app --host 0.0.0.0 --port 5000 --reload`的命令启动 +if __name__ == '__main__': + uvicorn.run(app, host='0.0.0.0', port=5000) \ No newline at end of file diff --git a/modelcache/manager/scalar_data/sql_storage_sqlite.py b/modelcache/manager/scalar_data/sql_storage_sqlite.py index 3c68f9e..b7463c5 100644 --- a/modelcache/manager/scalar_data/sql_storage_sqlite.py +++ b/modelcache/manager/scalar_data/sql_storage_sqlite.py @@ -100,8 +100,7 @@ def insert_query_resp(self, query_resp, **kwargs): hit_query = json.dumps(hit_query, ensure_ascii=False) table_name = "modelcache_query_log" - insert_sql = "INSERT INTO {} (error_code, error_desc, cache_hit, model, query, delta_time, hit_query, answer) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)".format(table_name) - + insert_sql = "INSERT INTO {} (error_code, error_desc, cache_hit, model, query, delta_time, hit_query, answer) VALUES (?, ?, ?, ?, ?, ?, ?, ?)".format(table_name) conn = sqlite3.connect(self._url) try: cursor = conn.cursor() From 3dbc951c9bd7390f66492e357a84914b101b0d96 Mon Sep 17 00:00:00 2001 From: charleschile Date: Wed, 20 Nov 2024 14:16:22 +0800 Subject: [PATCH 66/98] fastapi for modelcache --- examples/flask/llms_cache/data_insert.py | 1 + examples/flask/llms_cache/data_query.py | 1 + examples/flask/llms_cache/data_query_long.py | 1 + fastapi4modelcache.py | 62 +++++++++++++++----- 4 files changed, 51 insertions(+), 14 deletions(-) diff --git a/examples/flask/llms_cache/data_insert.py b/examples/flask/llms_cache/data_insert.py index 52d1405..e26bec7 100644 --- a/examples/flask/llms_cache/data_insert.py +++ b/examples/flask/llms_cache/data_insert.py @@ -14,6 +14,7 @@ def run(): res = requests.post(url, headers=headers, json=json.dumps(data)) res_text = res.text + print("data_insert:", res.status_code, res_text) if __name__ == '__main__': run() diff --git a/examples/flask/llms_cache/data_query.py b/examples/flask/llms_cache/data_query.py index aa59ae3..6fe8313 100644 --- a/examples/flask/llms_cache/data_query.py +++ b/examples/flask/llms_cache/data_query.py @@ -14,6 +14,7 @@ def run(): res = requests.post(url, headers=headers, json=json.dumps(data)) res_text = res.text + print("data_query:", res.status_code, res_text) if __name__ == '__main__': run() diff --git a/examples/flask/llms_cache/data_query_long.py b/examples/flask/llms_cache/data_query_long.py index e8c29f8..7aa4f14 100644 --- a/examples/flask/llms_cache/data_query_long.py +++ b/examples/flask/llms_cache/data_query_long.py @@ -19,6 +19,7 @@ def run(): res = requests.post(url, headers=headers, json=json.dumps(data)) res_text = res.text + print("data_query_long:", res.status_code, res_text) if __name__ == '__main__': run() diff --git a/fastapi4modelcache.py b/fastapi4modelcache.py index 58374c1..6e85612 100644 --- a/fastapi4modelcache.py +++ b/fastapi4modelcache.py @@ -9,6 +9,7 @@ from pydantic import BaseModel from concurrent.futures import ThreadPoolExecutor from starlette.responses import PlainTextResponse +import functools from modelcache import cache from modelcache.adapter import adapter @@ -63,9 +64,10 @@ class RequestData(BaseModel): executor = ThreadPoolExecutor(max_workers=6) # 异步保存查询信息 -async def save_query_info(result, model, query, delta_time_log): +async def save_query_info_fastapi(result, model, query, delta_time_log): loop = asyncio.get_running_loop() - await loop.run_in_executor(executor, cache.data_manager.save_query_resp, result, model, json.dumps(query, ensure_ascii=False), delta_time_log) + func = functools.partial(cache.data_manager.save_query_resp, result, model=model, query=json.dumps(query, ensure_ascii=False), delta_time=delta_time_log) + await loop.run_in_executor(None, func) @@ -74,20 +76,53 @@ async def first_fastapi(): return "hello, modelcache!" @app.post("/modelcache") -async def user_backend(request_data: RequestData): - # param parsing +async def user_backend(request: Request): try: - request_type = request_data.type + raw_body = await request.body() + # 解析字符串为JSON对象 + if isinstance(raw_body, bytes): + raw_body = raw_body.decode("utf-8") + if isinstance(raw_body, str): + try: + # 尝试将字符串解析为JSON对象 + request_data = json.loads(raw_body) + except json.JSONDecodeError: + # 如果无法解析,返回格式错误 + raise HTTPException(status_code=400, detail="Invalid JSON format") + else: + request_data = raw_body + + # 确保request_data是字典对象 + if isinstance(request_data, str): + try: + request_data = json.loads(request_data) + except json.JSONDecodeError: + raise HTTPException(status_code=400, detail="Invalid JSON format") + + request_type = request_data.get('type') model = None - if request_data.scope: - model = request_data.scope.get('model', '').replace('-','_').replace('.', '_') - query = request_data.query - chat_info = request_data.chat_info + if 'scope' in request_data: + model = request_data['scope'].get('model', '').replace('-', '_').replace('.', '_') + query = request_data.get('query') + chat_info = request_data.get('chat_info') + + if not request_type or request_type not in ['query', 'insert', 'remove', 'detox']: + raise HTTPException(status_code=400, detail="Type exception, should be one of ['query', 'insert', 'remove', 'detox']") except Exception as e: - result = {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + request_data = raw_body if 'raw_body' in locals() else None + result = { + "errorCode": 103, + "errorDesc": str(e), + "cacheHit": False, + "delta_time": 0, + "hit_query": '', + "answer": '', + "para_dict": request_data + } return result + # model filter filter_resp = model_blacklist_filter(model, request_type) if isinstance(filter_resp, dict): @@ -101,8 +136,7 @@ async def user_backend(request_data: RequestData): if response is None: result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} - # elif response in ['adapt_query_exception']: - elif isinstance(response, str): + elif response in ['adapt_query_exception']: result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} else: @@ -111,7 +145,7 @@ async def user_backend(request_data: RequestData): result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, "hit_query": hit_query, "answer": answer} delta_time_log = round(time.time() - start_time, 2) - asyncio.create_task(save_query_info(result, model, query, delta_time_log)) + asyncio.create_task(save_query_info_fastapi(result, model, query, delta_time_log)) return result except Exception as e: result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, @@ -130,7 +164,7 @@ async def user_backend(request_data: RequestData): return {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} if request_type == 'remove': - response = adapter.ChatCompletion.create_remove(model=model, remove_type=request_data.remove_type, id_list=request_data.id_list) + response = adapter.ChatCompletion.create_remove(model=model, remove_type=request_data.get("remove_type"), id_list=request_data.get("id_list")) if not isinstance(response, dict): return {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} From e490aeeb08251485275163b7ecdeb6424f03e327 Mon Sep 17 00:00:00 2001 From: charleschile Date: Wed, 20 Nov 2024 16:08:38 +0800 Subject: [PATCH 67/98] fastapi requirementsd --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 78d682e..3a8d124 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,3 +12,4 @@ transformers==4.38.2 faiss-cpu==1.7.4 redis==5.0.1 modelscope==1.14.0 +fastapi==0.115.5 \ No newline at end of file From 850bbca988ca760b824c3250227d0e9801540121 Mon Sep 17 00:00:00 2001 From: charleschile Date: Wed, 20 Nov 2024 16:18:59 +0800 Subject: [PATCH 68/98] fix bug --- fastapi4modelcache.py | 2 +- requirements.txt | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/fastapi4modelcache.py b/fastapi4modelcache.py index 6e85612..34a6bdc 100644 --- a/fastapi4modelcache.py +++ b/fastapi4modelcache.py @@ -183,4 +183,4 @@ async def user_backend(request: Request): # TODO: 可以修改为在命令行中使用`uvicorn your_module_name:app --host 0.0.0.0 --port 5000 --reload`的命令启动 if __name__ == '__main__': - uvicorn.run(app, host='0.0.0.0', port=5001) \ No newline at end of file + uvicorn.run(app, host='0.0.0.0', port=5000) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 3a8d124..95e1948 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,5 @@ transformers==4.38.2 faiss-cpu==1.7.4 redis==5.0.1 modelscope==1.14.0 -fastapi==0.115.5 \ No newline at end of file +fastapi==0.115.5 +uvicorn==0.32.0 \ No newline at end of file From c000a42cd5d91e32608794a826c9a769ac0419ed Mon Sep 17 00:00:00 2001 From: charleschile Date: Wed, 20 Nov 2024 17:31:50 +0800 Subject: [PATCH 69/98] fix bug --- fastapi4modelcache.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/fastapi4modelcache.py b/fastapi4modelcache.py index 34a6bdc..f9d0267 100644 --- a/fastapi4modelcache.py +++ b/fastapi4modelcache.py @@ -64,7 +64,7 @@ class RequestData(BaseModel): executor = ThreadPoolExecutor(max_workers=6) # 异步保存查询信息 -async def save_query_info_fastapi(result, model, query, delta_time_log): +async def save_query_info(result, model, query, delta_time_log): loop = asyncio.get_running_loop() func = functools.partial(cache.data_manager.save_query_resp, result, model=model, query=json.dumps(query, ensure_ascii=False), delta_time=delta_time_log) await loop.run_in_executor(None, func) @@ -86,9 +86,12 @@ async def user_backend(request: Request): try: # 尝试将字符串解析为JSON对象 request_data = json.loads(raw_body) - except json.JSONDecodeError: + except json.JSONDecodeError as e: # 如果无法解析,返回格式错误 - raise HTTPException(status_code=400, detail="Invalid JSON format") + result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + asyncio.create_task(save_query_info(result, model='', query='', delta_time_log=0)) + raise HTTPException(status_code=101, detail="Invalid JSON format") else: request_data = raw_body @@ -97,7 +100,7 @@ async def user_backend(request: Request): try: request_data = json.loads(request_data) except json.JSONDecodeError: - raise HTTPException(status_code=400, detail="Invalid JSON format") + raise HTTPException(status_code=101, detail="Invalid JSON format") request_type = request_data.get('type') model = None @@ -106,8 +109,12 @@ async def user_backend(request: Request): query = request_data.get('query') chat_info = request_data.get('chat_info') - if not request_type or request_type not in ['query', 'insert', 'remove', 'detox']: - raise HTTPException(status_code=400, detail="Type exception, should be one of ['query', 'insert', 'remove', 'detox']") + if not request_type or request_type not in ['query', 'insert', 'remove', 'register']: + result = {"errorCode": 102, + "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", + "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + asyncio.create_task(save_query_info(result, model=model, query='', delta_time_log=0)) + raise HTTPException(status_code=102, detail="Type exception, should be one of ['query', 'insert', 'remove', 'register']") except Exception as e: request_data = raw_body if 'raw_body' in locals() else None @@ -145,7 +152,7 @@ async def user_backend(request: Request): result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, "hit_query": hit_query, "answer": answer} delta_time_log = round(time.time() - start_time, 2) - asyncio.create_task(save_query_info_fastapi(result, model, query, delta_time_log)) + asyncio.create_task(save_query_info(result, model, query, delta_time_log)) return result except Exception as e: result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, From 2d692ae09c238589f29ab8233216407eca5d2c5d Mon Sep 17 00:00:00 2001 From: powerli2002 <845160705@qq.com> Date: Mon, 9 Dec 2024 22:09:47 +0800 Subject: [PATCH 70/98] Add feature : Using elasticsearch as a scalar database --- flask4modelcache.py | 5 +- modelcache/config/elasticsearch_config.ini | 5 + modelcache/manager/scalar_data/manager.py | 4 + .../manager/scalar_data/sql_storage_es.py | 195 ++++++++++++++++++ requirements.txt | 4 +- 5 files changed, 211 insertions(+), 2 deletions(-) create mode 100644 modelcache/config/elasticsearch_config.ini create mode 100644 modelcache/manager/scalar_data/sql_storage_es.py diff --git a/flask4modelcache.py b/flask4modelcache.py index ee5955b..3d15a68 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -38,11 +38,14 @@ def response_hitquery(cache_resp): milvus_config = configparser.ConfigParser() milvus_config.read('modelcache/config/milvus_config.ini') +es_config = configparser.ConfigParser() +es_config.read('modelcache/config/elasticsearch_config.ini') + # redis_config = configparser.ConfigParser() # redis_config.read('modelcache/config/redis_config.ini') -data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), +data_manager = get_data_manager(CacheBase("elasticsearch", config=es_config), VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) # data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), diff --git a/modelcache/config/elasticsearch_config.ini b/modelcache/config/elasticsearch_config.ini new file mode 100644 index 0000000..ba87a5a --- /dev/null +++ b/modelcache/config/elasticsearch_config.ini @@ -0,0 +1,5 @@ +[elasticsearch] +host = '' +port = '' +user = '' +password = '' \ No newline at end of file diff --git a/modelcache/manager/scalar_data/manager.py b/modelcache/manager/scalar_data/manager.py index 4c02c45..8ff3aee 100644 --- a/modelcache/manager/scalar_data/manager.py +++ b/modelcache/manager/scalar_data/manager.py @@ -27,6 +27,10 @@ def get(name, **kwargs): from modelcache.manager.scalar_data.sql_storage_sqlite import SQLStorage sql_url = kwargs.get("sql_url", SQL_URL[name]) cache_base = SQLStorage(db_type=name, url=sql_url) + elif name == 'elasticsearch': + from modelcache.manager.scalar_data.sql_storage_es import SQLStorage + config = kwargs.get("config") + cache_base = SQLStorage(db_type=name, config=config) else: raise NotFoundError("cache store", name) return cache_base diff --git a/modelcache/manager/scalar_data/sql_storage_es.py b/modelcache/manager/scalar_data/sql_storage_es.py new file mode 100644 index 0000000..68bcb31 --- /dev/null +++ b/modelcache/manager/scalar_data/sql_storage_es.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +import json +from typing import List +from elasticsearch import Elasticsearch, helpers +from modelcache.manager.scalar_data.base import CacheStorage, CacheData +import time +from snowflake import SnowflakeGenerator + + +class SQLStorage(CacheStorage): + def __init__( + self, + db_type: str = "elasticsearch", + config=None + ): + self.host = config.get('elasticsearch', 'host') + self.port = int(config.get('elasticsearch', 'port')) + self.client = Elasticsearch( + hosts=[{"host": self.host, "port": self.port}], + timeout=30, + http_auth=('esuser', 'password') + ) + + self.log_index = "modelcache_query_log" + self.ans_index = "modelcache_llm_answer" + self.create() + self.instance_id = 1 # 雪花算法使用的机器id 使用同一套数据库的分布式系统需要配置不同id + # 生成雪花id + self.snowflake_id = SnowflakeGenerator(self.instance_id) + + def create(self): + answer_index_body = { + "mappings": { + "properties": { + "gmt_create": {"type": "date", "format": "strict_date_optional_time||epoch_millis"}, + "gmt_modified": {"type": "date", "format": "strict_date_optional_time||epoch_millis"}, + "question": {"type": "text"}, + "answer": {"type": "text"}, + "answer_type": {"type": "integer"}, + "hit_count": {"type": "integer"}, + "model": {"type": "keyword"}, + "embedding_data": {"type": "binary"}, + "is_deleted": {"type": "integer"}, + } + } + } + + log_index_body = { + "mappings": { + "properties": { + "gmt_create": {"type": "date", "format": "strict_date_optional_time||epoch_millis"}, + "gmt_modified": {"type": "date", "format": "strict_date_optional_time||epoch_millis"}, + "error_code": {"type": "integer"}, + "error_desc": {"type": "text"}, + "cache_hit": {"type": "keyword"}, + "delta_time": {"type": "float"}, + "model": {"type": "keyword"}, + "query": {"type": "text"}, + "hit_query": {"type": "text"}, + "answer": {"type": "text"} + } + } + } + + if not self.client.indices.exists(index="modelcache_llm_answer"): + self.client.indices.create(index="modelcache_llm_answer", body=answer_index_body) + + if not self.client.indices.exists(index="modelcache_query_log"): + self.client.indices.create(index="modelcache_query_log", body=log_index_body) + + def _insert(self, data: List) -> str or None: + doc = { + "answer": data[0], + "question": data[1], + "embedding_data": data[2].tolist() if hasattr(data[2], "tolist") else data[2], + "model": data[3], + "answer_type": 0, + "hit_count": 0, + "is_deleted": 0 + } + + try: + + response = self.client.index( + index=self.ans_index, + id=next(self.snowflake_id), + body=doc, + ) + return int(response['_id']) + except Exception as e: + + print(f"Failed to insert document: {e}") + return None + + def batch_insert(self, all_data: List[List]) -> List[str]: + successful_ids = [] + for data in all_data: + _id = self._insert(data) + if _id is not None: + successful_ids.append(_id) + self.client.indices.refresh(index=self.ans_index) # 批量插入后手动刷新 + + return successful_ids + + def insert_query_resp(self, query_resp, **kwargs): + doc = { + "error_code": query_resp.get('errorCode'), + "error_desc": query_resp.get('errorDesc'), + "cache_hit": query_resp.get('cacheHit'), + "model": kwargs.get('model'), + "query": kwargs.get('query'), + "delta_time": kwargs.get('delta_time'), + "hit_query": json.dumps(query_resp.get('hit_query'), ensure_ascii=False) if isinstance( + query_resp.get('hit_query'), list) else query_resp.get('hit_query'), + "answer": query_resp.get('answer'), + "hit_count": 0, + "is_deleted": 0 + + } + self.client.index(index=self.log_index, body=doc) + + def get_data_by_id(self, key: int): + try: + response = self.client.get(index=self.ans_index, id=key, _source=['question', 'answer', 'embedding_data', 'model']) + source = response["_source"] + result = [ + source.get('question'), + source.get('answer'), + source.get('embedding_data'), + source.get('model') + ] + return result + except Exception as e: + print(e) + + def update_hit_count_by_id(self, primary_id: int): + self.client.update( + index=self.ans_index, + id=primary_id, + body={"script": {"source": "ctx._source.hit_count += 1"}} + ) + + def get_ids(self, deleted=True): + query = { + "query": { + "term": {"is_deleted": 1 if deleted else 0} + } + } + response = self.client.search(index=self.ans_index, body=query) + return [hit["_id"] for hit in response["hits"]["hits"]] + + def mark_deleted(self, keys): + actions = [ + { + "_op_type": "update", + "_index": self.ans_index, + "_id": key, + "doc": {"is_deleted": 1} + } + for key in keys + ] + responses = helpers.bulk(self.client, actions) + return responses[0] # 返回更新的文档数 + + def model_deleted(self, model_name): + query = { + "query": { + "term": {"model": model_name} + } + } + + response = self.client.delete_by_query(index=self.ans_index, body=query) + return response["deleted"] + + def clear_deleted_data(self): + query = { + "query": { + "term": {"is_deleted": 1} + } + } + response = self.client.delete_by_query(index=self.ans_index, body=query) + return response["deleted"] + + def count(self, state: int = 0, is_all: bool = False): + query = {"query": {"match_all": {}}} if is_all else {"query": {"term": {"is_deleted": state}}} + response = self.client.count(index=self.ans_index, body=query) + return response["count"] + + def close(self): + self.client.close() + + def count_answers(self): + query = {"query": {"match_all": {}}} + response = self.client.count(index=self.ans_index, body=query) + return response["count"] diff --git a/requirements.txt b/requirements.txt index 95e1948..63899e9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,4 +13,6 @@ faiss-cpu==1.7.4 redis==5.0.1 modelscope==1.14.0 fastapi==0.115.5 -uvicorn==0.32.0 \ No newline at end of file +uvicorn==0.32.0 +elasticsearch==7.10.0 +snowflake-id==1.0.2 \ No newline at end of file From 682179b50a7323753ad41d3f02f0a80362e03704 Mon Sep 17 00:00:00 2001 From: powerli2002 <845160705@qq.com> Date: Mon, 9 Dec 2024 22:43:07 +0800 Subject: [PATCH 71/98] Add feature : add timm support --- .../manager/scalar_data/sql_storage_es.py | 8 +- modelcache_mm/config/elasticsearch_config.ini | 5 + .../manager/scalar_data/sql_storage_es.py | 198 ++++++++++++++++++ 3 files changed, 207 insertions(+), 4 deletions(-) create mode 100644 modelcache_mm/config/elasticsearch_config.ini create mode 100644 modelcache_mm/manager/scalar_data/sql_storage_es.py diff --git a/modelcache/manager/scalar_data/sql_storage_es.py b/modelcache/manager/scalar_data/sql_storage_es.py index 68bcb31..7e0184a 100644 --- a/modelcache/manager/scalar_data/sql_storage_es.py +++ b/modelcache/manager/scalar_data/sql_storage_es.py @@ -62,11 +62,11 @@ def create(self): } } - if not self.client.indices.exists(index="modelcache_llm_answer"): - self.client.indices.create(index="modelcache_llm_answer", body=answer_index_body) + if not self.client.indices.exists(index=self.ans_index): + self.client.indices.create(index=self.ans_index, body=answer_index_body) - if not self.client.indices.exists(index="modelcache_query_log"): - self.client.indices.create(index="modelcache_query_log", body=log_index_body) + if not self.client.indices.exists(index=self.log_index): + self.client.indices.create(index=self.log_index, body=log_index_body) def _insert(self, data: List) -> str or None: doc = { diff --git a/modelcache_mm/config/elasticsearch_config.ini b/modelcache_mm/config/elasticsearch_config.ini new file mode 100644 index 0000000..ba87a5a --- /dev/null +++ b/modelcache_mm/config/elasticsearch_config.ini @@ -0,0 +1,5 @@ +[elasticsearch] +host = '' +port = '' +user = '' +password = '' \ No newline at end of file diff --git a/modelcache_mm/manager/scalar_data/sql_storage_es.py b/modelcache_mm/manager/scalar_data/sql_storage_es.py new file mode 100644 index 0000000..562b349 --- /dev/null +++ b/modelcache_mm/manager/scalar_data/sql_storage_es.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +import json +from typing import List +from elasticsearch import Elasticsearch, helpers +from modelcache.manager.scalar_data.base import CacheStorage, CacheData +import time +from snowflake import SnowflakeGenerator + + +class SQLStorage(CacheStorage): + def __init__( + self, + db_type: str = "elasticsearch", + config=None + ): + self.host = config.get('elasticsearch', 'host') + self.port = int(config.get('elasticsearch', 'port')) + self.client = Elasticsearch( + hosts=[{"host": self.host, "port": self.port}], + timeout=30, + http_auth=('esuser', 'password') + ) + + self.log_index = "open_cache_mm_query_log" + self.ans_index = "open_cache_mm_answer" + self.create() + self.instance_id = 1 # 雪花算法使用的机器id 使用同一套数据库的分布式系统需要配置不同id + # 生成雪花id + self.snowflake_id = SnowflakeGenerator(self.instance_id) + + def create(self): + answer_index_body = { + "mappings": { + "properties": { + "gmt_create": {"type": "date", "format": "strict_date_optional_time||epoch_millis"}, + "gmt_modified": {"type": "date", "format": "strict_date_optional_time||epoch_millis"}, + "question": {"type": "text"}, + "answer": {"type": "text"}, + "answer_type": {"type": "integer"}, + "hit_count": {"type": "integer"}, + "model": {"type": "keyword"}, + "image_url": {"type": "text"}, + "image_id": {"type": "text"}, + "is_deleted": {"type": "integer"}, + } + } + } + + log_index_body = { + "mappings": { + "properties": { + "gmt_create": {"type": "date", "format": "strict_date_optional_time||epoch_millis"}, + "gmt_modified": {"type": "date", "format": "strict_date_optional_time||epoch_millis"}, + "error_code": {"type": "integer"}, + "error_desc": {"type": "text"}, + "cache_hit": {"type": "keyword"}, + "delta_time": {"type": "float"}, + "model": {"type": "keyword"}, + "query": {"type": "text"}, + "hit_query": {"type": "text"}, + "answer": {"type": "text"} + } + } + } + + if not self.client.indices.exists(index=self.ans_index): + self.client.indices.create(index=self.ans_index, body=answer_index_body) + + if not self.client.indices.exists(index=self.log_index): + self.client.indices.create(index=self.log_index, body=log_index_body) + + def _insert(self, data: List) -> str or None: + doc = { + "answer": data[0], + "question": data[1], + "image_url": data[2], + "image_id": data[3], + "model": data[4], + "answer_type": 0, + "hit_count": 0, + "is_deleted": 0 + } + + try: + + response = self.client.index( + index=self.ans_index, + id=next(self.snowflake_id), + body=doc, + ) + return int(response['_id']) + except Exception as e: + + print(f"Failed to insert document: {e}") + return None + + def batch_insert(self, all_data: List[List]) -> List[str]: + successful_ids = [] + for data in all_data: + _id = self._insert(data) + if _id is not None: + successful_ids.append(_id) + self.client.indices.refresh(index=self.ans_index) # 批量插入后手动刷新 + + return successful_ids + + def insert_query_resp(self, query_resp, **kwargs): + doc = { + "error_code": query_resp.get('errorCode'), + "error_desc": query_resp.get('errorDesc'), + "cache_hit": query_resp.get('cacheHit'), + "model": kwargs.get('model'), + "query": kwargs.get('query'), + "delta_time": kwargs.get('delta_time'), + "hit_query": json.dumps(query_resp.get('hit_query'), ensure_ascii=False) if isinstance( + query_resp.get('hit_query'), list) else query_resp.get('hit_query'), + "answer": query_resp.get('answer'), + "hit_count": 0, + "is_deleted": 0 + + } + self.client.index(index=self.log_index, body=doc) + + def get_data_by_id(self, key: int): + try: + response = self.client.get(index=self.ans_index, id=key, _source=['question', 'image_url','image_id', 'answer', 'model']) + source = response["_source"] + result = [ + source.get('question'), + source.get('image_url'), + source.get('image_id'), + source.get('answer'), + source.get('model') + ] + return result + except Exception as e: + print(e) + + def update_hit_count_by_id(self, primary_id: int): + self.client.update( + index=self.ans_index, + id=primary_id, + body={"script": {"source": "ctx._source.hit_count += 1"}} + ) + + def get_ids(self, deleted=True): + query = { + "query": { + "term": {"is_deleted": 1 if deleted else 0} + } + } + response = self.client.search(index=self.ans_index, body=query) + return [hit["_id"] for hit in response["hits"]["hits"]] + + def mark_deleted(self, keys): + actions = [ + { + "_op_type": "update", + "_index": self.ans_index, + "_id": key, + "doc": {"is_deleted": 1} + } + for key in keys + ] + responses = helpers.bulk(self.client, actions) + return responses[0] # 返回更新的文档数 + + def model_deleted(self, model_name): + query = { + "query": { + "term": {"model": model_name} + } + } + + response = self.client.delete_by_query(index=self.ans_index, body=query) + return response["deleted"] + + def clear_deleted_data(self): + query = { + "query": { + "term": {"is_deleted": 1} + } + } + response = self.client.delete_by_query(index=self.ans_index, body=query) + return response["deleted"] + + def count(self, state: int = 0, is_all: bool = False): + query = {"query": {"match_all": {}}} if is_all else {"query": {"term": {"is_deleted": state}}} + response = self.client.count(index=self.ans_index, body=query) + return response["count"] + + def close(self): + self.client.close() + + def count_answers(self): + query = {"query": {"match_all": {}}} + response = self.client.count(index=self.ans_index, body=query) + return response["count"] From aead2c1d67f62a2ec09d8c16446556abc6431563 Mon Sep 17 00:00:00 2001 From: powerli2002 <845160705@qq.com> Date: Sat, 14 Dec 2024 16:35:29 +0800 Subject: [PATCH 72/98] Add feature : add chromadb support as a vector database --- flask4modelcache.py | 6 ++ modelcache/config/chromadb_config.ini | 2 + modelcache/manager/vector_data/chroma.py | 93 +++++++++++++++++ modelcache/manager/vector_data/manager.py | 8 +- modelcache/utils/__init__.py | 4 + modelcache_mm/config/chromadb_config.ini | 2 + modelcache_mm/manager/vector_data/chroma.py | 100 +++++++++++++++++++ modelcache_mm/manager/vector_data/manager.py | 9 ++ modelcache_mm/utils/__init__.py | 4 + requirements.txt | 3 +- 10 files changed, 225 insertions(+), 6 deletions(-) create mode 100644 modelcache/config/chromadb_config.ini create mode 100644 modelcache/manager/vector_data/chroma.py create mode 100644 modelcache_mm/config/chromadb_config.ini create mode 100644 modelcache_mm/manager/vector_data/chroma.py diff --git a/flask4modelcache.py b/flask4modelcache.py index ee5955b..4dc85a9 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -41,10 +41,16 @@ def response_hitquery(cache_resp): # redis_config = configparser.ConfigParser() # redis_config.read('modelcache/config/redis_config.ini') +# chromadb_config = configparser.ConfigParser() +# chromadb_config.read('modelcache/config/chromadb_config.ini') data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) + +# data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), +# VectorBase("chromadb", dimension=data2vec.dimension, chromadb_config=chromadb_config)) + # data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), # VectorBase("redis", dimension=data2vec.dimension, redis_config=redis_config)) diff --git a/modelcache/config/chromadb_config.ini b/modelcache/config/chromadb_config.ini new file mode 100644 index 0000000..7400f80 --- /dev/null +++ b/modelcache/config/chromadb_config.ini @@ -0,0 +1,2 @@ +[chromadb] +persist_directory='' diff --git a/modelcache/manager/vector_data/chroma.py b/modelcache/manager/vector_data/chroma.py new file mode 100644 index 0000000..5bcb419 --- /dev/null +++ b/modelcache/manager/vector_data/chroma.py @@ -0,0 +1,93 @@ +from typing import List + +import numpy as np +import logging +from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.utils import import_chromadb, import_torch + +import_torch() +import_chromadb() + +import chromadb + + +class Chromadb(VectorBase): + + def __init__( + self, + persist_directory="./chromadb", + top_k: int = 1, + ): + self.collection_name = "modelcache" + self.top_k = top_k + + self._client = chromadb.PersistentClient(path=persist_directory) + self._collection = None + + def mul_add(self, datas: List[VectorData], model=None): + collection_name_model = self.collection_name + '_' + model + self._collection = self._client.get_or_create_collection(name=collection_name_model) + + data_array, id_array = map(list, zip(*((data.data.tolist(), str(data.id)) for data in datas))) + self._collection.add(embeddings=data_array, ids=id_array) + + def search(self, data: np.ndarray, top_k: int = -1, model=None): + collection_name_model = self.collection_name + '_' + model + self._collection = self._client.get_or_create_collection(name=collection_name_model) + + if self._collection.count() == 0: + return [] + if top_k == -1: + top_k = self.top_k + results = self._collection.query( + query_embeddings=[data.tolist()], + n_results=top_k, + include=["distances"], + ) + return list(zip(results["distances"][0], [int(x) for x in results["ids"][0]])) + + def rebuild(self, ids=None): + pass + + def delete(self, ids, model=None): + try: + collection_name_model = self.collection_name + '_' + model + self._collection = self._client.get_or_create_collection(name=collection_name_model) + # 查询集合中实际存在的 ID + ids_str = [str(x) for x in ids] + existing_ids = set(self._collection.get(ids=ids_str).ids) + + # 删除存在的 ID + if existing_ids: + self._collection.delete(list(existing_ids)) + + # 返回实际删除的条目数量 + return len(existing_ids) + + except Exception as e: + logging.error('Error during deletion: {}'.format(e)) + raise ValueError(str(e)) + + def rebuild_col(self, model): + collection_name_model = self.collection_name + '_' + model + + # 检查集合是否存在,如果存在则删除 + collections = self._client.list_collections() + if any(col.name == collection_name_model for col in collections): + self._client.delete_collection(collection_name_model) + else: + return 'model collection not found, please check!' + + try: + self._client.create_collection(collection_name_model) + except Exception as e: + logging.info(f'rebuild_collection: {e}') + raise ValueError(str(e)) + + def flush(self): + # chroma无flush方法 + pass + + def close(self): + # chroma无flush方法 + pass diff --git a/modelcache/manager/vector_data/manager.py b/modelcache/manager/vector_data/manager.py index 70448b2..0fc5334 100644 --- a/modelcache/manager/vector_data/manager.py +++ b/modelcache/manager/vector_data/manager.py @@ -102,13 +102,11 @@ def get(name, **kwargs): elif name == "chromadb": from modelcache.manager.vector_data.chroma import Chromadb - client_settings = kwargs.get("client_settings", None) - persist_directory = kwargs.get("persist_directory", None) - collection_name = kwargs.get("collection_name", COLLECTION_NAME) + chromadb_config = kwargs.get("chromadb_config", None) + persist_directory = chromadb_config.get('chromadb','persist_directory') + vector_base = Chromadb( - client_settings=client_settings, persist_directory=persist_directory, - collection_name=collection_name, top_k=top_k, ) elif name == "hnswlib": diff --git a/modelcache/utils/__init__.py b/modelcache/utils/__init__.py index 147a56e..15d90ec 100644 --- a/modelcache/utils/__init__.py +++ b/modelcache/utils/__init__.py @@ -73,3 +73,7 @@ def import_pillow(): def import_redis(): _check_library("redis") + + +def import_chromadb(): + _check_library("chromadb", package="chromadb") \ No newline at end of file diff --git a/modelcache_mm/config/chromadb_config.ini b/modelcache_mm/config/chromadb_config.ini new file mode 100644 index 0000000..2a87f3b --- /dev/null +++ b/modelcache_mm/config/chromadb_config.ini @@ -0,0 +1,2 @@ +[chromadb] +persist_directory=./chromadb diff --git a/modelcache_mm/manager/vector_data/chroma.py b/modelcache_mm/manager/vector_data/chroma.py new file mode 100644 index 0000000..fb97e32 --- /dev/null +++ b/modelcache_mm/manager/vector_data/chroma.py @@ -0,0 +1,100 @@ +from typing import List + +import numpy as np +import logging +from modelcache_mm.manager.vector_data.base import VectorBase, VectorData +from modelcache_mm.utils import import_chromadb, import_torch +from modelcache_mm.utils.index_util import get_mm_index_name + +import_torch() +import_chromadb() + +import chromadb + + +class Chromadb(VectorBase): + + def __init__( + self, + persist_directory="./chromadb", + top_k: int = 1, + ): + # self.collection_name = "modelcache" + self.top_k = top_k + + self._client = chromadb.PersistentClient(path=persist_directory) + self._collection = None + + def create(self, model=None, mm_type=None): + try: + collection_name_model = get_mm_index_name(model, mm_type) + # collection_name_model = self.collection_name + '_' + model + self._client.get_or_create_collection(name=collection_name_model) + except Exception as e: + raise ValueError(str(e)) + + def add(self, datas: List[VectorData], model=None, mm_type=None): + collection_name_model = get_mm_index_name(model, mm_type) + self._collection = self._client.get_or_create_collection(name=collection_name_model) + + data_array, id_array = map(list, zip(*((data.data.tolist(), str(data.id)) for data in datas))) + self._collection.add(embeddings=data_array, ids=id_array) + + def search(self, data: np.ndarray, top_k: int = -1, model=None, mm_type='mm'): + collection_name_model = get_mm_index_name(model, mm_type) + self._collection = self._client.get_or_create_collection(name=collection_name_model) + + if self._collection.count() == 0: + return [] + if top_k == -1: + top_k = self.top_k + results = self._collection.query( + query_embeddings=[data.tolist()], + n_results=top_k, + include=["distances"], + ) + return list(zip(results["distances"][0], [int(x) for x in results["ids"][0]])) + + def delete(self, ids, model=None, mm_type=None): + try: + collection_name_model = get_mm_index_name(model, mm_type) + self._collection = self._client.get_or_create_collection(name=collection_name_model) + # 查询集合中实际存在的 ID + ids_str = [str(x) for x in ids] + existing_ids = set(self._collection.get(ids=ids_str).ids) + + # 删除存在的 ID + if existing_ids: + self._collection.delete(list(existing_ids)) + + # 返回实际删除的条目数量 + return len(existing_ids) + + except Exception as e: + logging.error('Error during deletion: {}'.format(e)) + raise ValueError(str(e)) + + def rebuild_idx(self, model, mm_type=None): + collection_name_model = get_mm_index_name(model, mm_type) + + # 检查集合是否存在,如果存在则删除 + collections = self._client.list_collections() + if any(col.name == collection_name_model for col in collections): + self._client.delete_collection(collection_name_model) + else: + return 'model collection not found, please check!' + + try: + self._client.create_collection(collection_name_model) + except Exception as e: + logging.info(f'rebuild_collection: {e}') + raise ValueError(str(e)) + + def rebuild(self, ids=None): + pass + + def flush(self): + pass + + def close(self): + pass diff --git a/modelcache_mm/manager/vector_data/manager.py b/modelcache_mm/manager/vector_data/manager.py index c10decc..64cca45 100644 --- a/modelcache_mm/manager/vector_data/manager.py +++ b/modelcache_mm/manager/vector_data/manager.py @@ -108,6 +108,15 @@ def get(name, **kwargs): dimension=dimension, top_k=top_k ) + elif name == "chromadb": + from modelcache_mm.manager.vector_data.chroma import Chromadb + + chromadb_config = kwargs.get("chromadb_config", None) + persist_directory = chromadb_config.get('chromadb', 'persist_directory') + vector_base = Chromadb( + persist_directory=persist_directory, + top_k=top_k, + ) else: raise NotFoundError("vector store", name) return vector_base diff --git a/modelcache_mm/utils/__init__.py b/modelcache_mm/utils/__init__.py index 147a56e..15d90ec 100644 --- a/modelcache_mm/utils/__init__.py +++ b/modelcache_mm/utils/__init__.py @@ -73,3 +73,7 @@ def import_pillow(): def import_redis(): _check_library("redis") + + +def import_chromadb(): + _check_library("chromadb", package="chromadb") \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 95e1948..77b3868 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,4 +13,5 @@ faiss-cpu==1.7.4 redis==5.0.1 modelscope==1.14.0 fastapi==0.115.5 -uvicorn==0.32.0 \ No newline at end of file +uvicorn==0.32.0 +chromadb==0.5.23 From f3c7657858f18ec2e199a41208b2d6648fd311a2 Mon Sep 17 00:00:00 2001 From: powerli2002 <845160705@qq.com> Date: Sat, 14 Dec 2024 16:37:34 +0800 Subject: [PATCH 73/98] Add feature : add chromadb support as a vector database --- modelcache/manager/vector_data/chroma.py | 1 - modelcache_mm/manager/vector_data/chroma.py | 1 - 2 files changed, 2 deletions(-) diff --git a/modelcache/manager/vector_data/chroma.py b/modelcache/manager/vector_data/chroma.py index 5bcb419..446b354 100644 --- a/modelcache/manager/vector_data/chroma.py +++ b/modelcache/manager/vector_data/chroma.py @@ -89,5 +89,4 @@ def flush(self): pass def close(self): - # chroma无flush方法 pass diff --git a/modelcache_mm/manager/vector_data/chroma.py b/modelcache_mm/manager/vector_data/chroma.py index fb97e32..7f5b78c 100644 --- a/modelcache_mm/manager/vector_data/chroma.py +++ b/modelcache_mm/manager/vector_data/chroma.py @@ -19,7 +19,6 @@ def __init__( persist_directory="./chromadb", top_k: int = 1, ): - # self.collection_name = "modelcache" self.top_k = top_k self._client = chromadb.PersistentClient(path=persist_directory) From 2556eb6dda5d235603b0f6a3b16f47e68c6eb104 Mon Sep 17 00:00:00 2001 From: "Amber.Z" Date: Wed, 18 Dec 2024 17:38:46 +0800 Subject: [PATCH 74/98] add docs --- README.md | 186 +++++++++++++++++++----------- README_CN.md | 51 +++++++- docs/1.what-is-model-cache.md | 0 docs/2.model-cache-features.md | 0 docs/3.model-cache-quick-start.md | 0 docs/4.create-cache.md | 0 docs/5.configure-cache.md | 0 7 files changed, 169 insertions(+), 68 deletions(-) create mode 100644 docs/1.what-is-model-cache.md create mode 100644 docs/2.model-cache-features.md create mode 100644 docs/3.model-cache-quick-start.md create mode 100644 docs/4.create-cache.md create mode 100644 docs/5.configure-cache.md diff --git a/README.md b/README.md index 6ebc5f7..8e9489c 100644 --- a/README.md +++ b/README.md @@ -9,62 +9,102 @@ ModelCache ## Contents -- [news](#news) -- [Introduction](#Introduction) -- [Quick-Deployment](#Quick-Deployment) -- [Service-Access](#Service-Access) -- [Articles](#Articles) -- [Modules](#Modules) -- [Core-Features](#Core-Features) -- [Acknowledgements](#Acknowledgements) -- [Contributing](#Contributing) -## news -- 🔥🔥[2024.04.09] Add Redis Search to store and retrieve embeddings in multi-tenant scene, this can reduce the interaction time between Cache and vector databases to 10ms. -- 🔥🔥[2023.12.10] we integrate LLM embedding frameworks such as 'llmEmb', 'ONNX', 'PaddleNLP', 'FastText', alone with the image embedding framework 'timm', to bolster embedding functionality. -- 🔥🔥[2023.11.20] codefuse-ModelCache has integrated local storage, such as sqlite and faiss, providing users with the convenience of quickly initiating tests. + +- [Contents](#contents) +- [News](#news) + - [Introduction](#introduction) +- [Architecture](#architecture) +- [Quick start](#quick-start) + - [Dependencies](#dependencies) + - [Start service](#start-service) + - [Start Demo](#start-demo) + - [Start normal service](#start-normal-service) +- [Access the service](#access-the-service) + - [Write cache](#write-cache) + - [Query cache](#query-cache) + - [Clear cache](#clear-cache) +- [Function comparison](#function-comparison) +- [Core-Features](#core-features) +- [Todo List](#todo-list) + - [Adapter](#adapter) + - [Embedding model\&inference](#embedding-modelinference) + - [Scalar Storage](#scalar-storage) + - [Vector Storage](#vector-storage) + - [Ranking](#ranking) + - [Service](#service) +- [Acknowledgements](#acknowledgements) +- [Contributing](#contributing) + +## News + +- 🔥🔥[2024.04.09] Added Redis Search to store and retrieve embeddings in multi-tenant. This can reduce the interaction time between Cache and vector databases to 10ms. +- 🔥🔥[2023.12.10] Integrated LLM embedding frameworks such as 'llmEmb', 'ONNX', 'PaddleNLP', 'FastText', and the image embedding framework 'timm' to bolster embedding functionality. +- 🔥🔥[2023.11.20] Integrated local storage, such as sqlite and faiss. This enables you to initiate quick and convenient tests. - [2023.08.26] codefuse-ModelCache... + ### Introduction + Codefuse-ModelCache is a semantic cache for large language models (LLMs). By caching pre-generated model results, it reduces response time for similar requests and improves user experience.
    This project aims to optimize services by introducing a caching mechanism. It helps businesses and research institutions reduce the cost of inference deployment, improve model performance and efficiency, and provide scalable services for large models. Through open-source, we aim to share and exchange technologies related to large model semantic cache. -## Quick Deployment -The project's startup scripts are divided into flask4modelcache.py and flask4modelcache_demo.py. -- flask4modelcache_demo.py is a quick test service that embeds sqlite and faiss, and users do not need to be concerned about database-related matters. -- flask4modelcache.py is the normal service that requires configuration of mysql and milvus database services. + +## Architecture + +![modelcache modules](docs/modelcache_modules_20240409.png) + +## Quick start + +You can find the start script in `flask4modelcache.py` and `flask4modelcache_demo.py`. + +- `flask4modelcache_demo.py` is a quick test service that embeds sqlite and faiss. You do not need to be concerned about database-related matters. +- `flask4modelcache.py` is the normal service that requires configuration of MySQL and Milvus. + ### Dependencies -- Python version: 3.8 and above -- Package Installation -```shell -pip install -r requirements.txt -``` -### Service Startup -#### Demo Service Startup -1. Download the embedding model bin file from the following address: [https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place the downloaded bin file in the model/text2vec-base-chinese folder. -2. Start the backend service using the flask4modelcache_dome.py script. -```shell -cd CodeFuse-ModelCache -``` -```shell -python flask4modelcache_demo.py -``` +- Python: V3.8 and above +- Package installation + + ```shell + pip install -r requirements.txt + ``` + +### Start service + +#### Start Demo + +1. Download the embedding model bin file on [Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place the downloaded bin file in the model/text2vec-base-chinese folder. +2. Start the backend service by using `flask4modelcache_dome.py`. + + ```shell + cd CodeFuse-ModelCache + ``` + + ```shell + python flask4modelcache_demo.py + ``` + +#### Start normal service + +Before you start normal service, make sure that you have completed these steps: + +1. Install the relational database MySQL and import the SQL file to create the data tables. You can find the SQL file in `reference_doc/create_table.sql`. +2. Install vector database Milvus. +3. Add the database access information to the configuration files: + 1. `modelcache/config/milvus_config.ini` + 2. `modelcache/config/mysql_config.ini` +4. Download the embedding model bin file from [Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Put the bin file in the `model/text2vec-base-chinese` directory. +5. Start the backend service by using the `flask4modelcache.py` script. + +## Access the service -#### Normal Service Startup -Before starting the service, the following environment configurations should be performed: -1. Install the relational database MySQL and import the SQL file to create the data tables. The SQL file can be found at: ```reference_doc/create_table.sql``` -2. Install the vector database Milvus. -3. Add the database access information to the configuration files: - 1. ```modelcache/config/milvus_config.ini ``` - 2. ```modelcache/config/mysql_config.ini``` -4. Download the embedding model bin file from the following address: [https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place the downloaded bin file in the model/text2vec-base-chinese folder. -5. Start the backend service using the flask4modelcache.py script. -## Service-Access The current service provides three core functionalities through RESTful API.: Cache-Writing, Cache-Querying, and Cache-Clearing. Demos: -### Cache-Writing + +### Write cache + ```python import json import requests @@ -77,7 +117,9 @@ data = {'type': type, 'scope': scope, 'chat_info': chat_info} headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) ``` -### Cache-Querying + +### Query cache + ```python import json import requests @@ -90,7 +132,9 @@ data = {'type': type, 'scope': scope, 'query': query} headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) ``` -### Cache-Clearing + +### Clear cache + ```python import json import requests @@ -103,12 +147,10 @@ data = {'type': type, 'scope': scope, 'remove_type': remove_type} headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) ``` -## Articles -https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ -## modules -![modelcache modules](docs/modelcache_modules_20240409.png) -## Function-Comparison -In terms of functionality, we have made several changes to the git repository. Firstly, we have addressed the network issues with huggingface and enhanced the inference speed by introducing local inference capabilities for embeddings. Additionally, considering the limitations of the SqlAlchemy framework, we have completely revamped the module responsible for interacting with relational databases, enabling more flexible database operations. In practical scenarios, LLM products often require integration with multiple users and multiple models. Hence, we have added support for multi-tenancy in the ModelCache, while also making preliminary compatibility adjustments for system commands and multi-turn dialogue. + +## Function comparison + +We've implemented several key updates to our repository. We've resolved network issues with Hugging Face and improved inference speed by introducing local embedding capabilities. Due to limitations in SqlAlchemy, we've redesigned our relational database interaction module for more flexible operations. We've added multi-tenancy support to ModelCache, recognizing the need for multiple users and models in LLM products. Lastly, we've made initial adjustments for better compatibility with system commands and multi-turn dialogues. @@ -231,45 +273,59 @@ In terms of functionality, we have made several changes to the git repository. F
    - ## Core-Features + In ModelCache, we adopted the main idea of GPTCache, includes core modules: adapter, embedding, similarity, and data_manager. The adapter module is responsible for handling the business logic of various tasks and can connect the embedding, similarity, and data_manager modules. The embedding module is mainly responsible for converting text into semantic vector representations, it transforms user queries into vector form.The rank module is used for sorting and evaluating the similarity of the recalled vectors. The data_manager module is primarily used for managing the database. In order to better facilitate industrial applications, we have made architectural and functional upgrades as follows: - [x] We have modified it similar to Redis and embedded it into the LLMs product, providing semantic caching capabilities. This ensures that it does not interfere with LLM calls, security audits, and other functionalities, achieving compatibility with all large-scale model services. -- [x] Multiple Model Loading Schemes: - - Support loading local embedding models to address Hugging Face network connectivity issues. - - Support loading various pretrained model embedding layers. -- [x] Data Isolation Capability - - Environment Isolation: Can pull different database configurations based on the environment to achieve environment isolation (dev, prepub, prod). - - Multi-tenant Data Isolation: Dynamically create collections based on the model for data isolation, addressing data isolation issues in multi-model/services scenarios in LLMs products. +- [x] Multiple Model Loading Schemes: + - Support loading local embedding models to address Hugging Face network connectivity issues. + - Support loading various pretrained model embedding layers. +- [x] Data Isolation Capability + - Environment Isolation: Can pull different database configurations based on the environment to achieve environment isolation (dev, prepub, prod). + - Multi-tenant Data Isolation: Dynamically create collections based on the model for data isolation, addressing data isolation issues in multi-model/services scenarios in LLMs products. - [x] Support for System Commands: Adopting a concatenation approach to address the issue of system commands in the prompt format. - [x] Differentiation of Long and Short Texts: Long texts pose more challenges for similarity evaluation. To address this, we have added differentiation between long and short texts, allowing for separate configuration of threshold values for determining similarity. - [x] Milvus Performance Optimization: The consistency_level of Milvus has been adjusted to "Session" level, which can result in better performance. -- [x] Data Management Capability: - - Ability to clear the cache, used for data management after model upgrades. - - Hitquery recall for subsequent data analysis and model iteration reference. - - Asynchronous log write-back capability for data analysis and statistics. - - Added model field and data statistics field for feature expansion. +- [x] Data Management Capability: + - Ability to clear the cache, used for data management after model upgrades. + - Hitquery recall for subsequent data analysis and model iteration reference. + - Asynchronous log write-back capability for data analysis and statistics. + - Added model field and data statistics field for feature expansion. ## Todo List + ### Adapter + - [ ] Register adapter for Milvus:Based on the "model" parameter in the scope, initialize the corresponding Collection and perform the load operation. + ### Embedding model&inference + - [ ] Inference Optimization: Optimizing the speed of embedding inference, compatible with inference engines such as FasterTransformer, TurboTransformers, and ByteTransformer. - [ ] Compatibility with Hugging Face models and ModelScope models, offering more methods for model loading. + ### Scalar Storage + - [ ] Support MongoDB - [ ] Support ElasticSearch + ### Vector Storage + - [ ] Adapts Faiss storage in multimodal scenarios. + ### Ranking + - [ ] Add ranking model to refine the order of data after embedding recall. + ### Service + - [ ] Supports FastAPI. - [ ] Add visual interface to offer a more direct user experience. ## Acknowledgements + This project has referenced the following open-source projects. We would like to express our gratitude to the projects and their developers for their contributions and research.
    [GPTCache](https://github.com/zilliztech/GPTCache) ## Contributing -ModelCache is a captivating and invaluable project, whether you are an experienced developer or a novice just starting out, your contributions to this project are warmly welcomed. Your involvement in this project, be it through raising issues, providing suggestions, writing code, or documenting and creating examples, will enhance the project's quality and make a significant contribution to the open-source community. \ No newline at end of file + +ModelCache is a captivating and invaluable project, whether you are an experienced developer or a novice just starting out, your contributions to this project are warmly welcomed. Your involvement in this project, be it through raising issues, providing suggestions, writing code, or documenting and creating examples, will enhance the project's quality and make a significant contribution to the open-source community. diff --git a/README_CN.md b/README_CN.md index 5959380..89c3fad 100644 --- a/README_CN.md +++ b/README_CN.md @@ -15,6 +15,7 @@ ModelCache ## Contents + - [新闻](#新闻) - [项目简介](#项目简介) - [快速部署](#快速部署) @@ -24,29 +25,42 @@ ModelCache - [核心功能](#核心功能) - [致谢](#致谢) - [Contributing](#Contributing) + ## 新闻 + - 🔥🔥[2024.10.22] 增加1024程序员节任务。 - 🔥🔥[2024.04.09] 增加了多租户场景中Redis Search存储和检索embedding的能力,可以将Cache和向量数据库的交互耗时降低至10ms内。 - 🔥🔥[2023.12.10] 增加llmEmb、onnx、paddlenlp、fasttext等LLM embedding框架,并增加timm 图片embedding框架,用于提供更丰富的embedding能力。 - 🔥🔥[2023.11.20] codefuse-ModelCache增加本地存储能力, 适配了嵌入式数据库sqlite、faiss,方便用户快速启动测试。 - [2023.10.31] codefuse-ModelCache... + ## 项目简介 + Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存已生成的模型结果,降低类似请求的响应时间,提升用户体验。该项目从服务优化角度出发,引入缓存机制,在资源有限和对实时性要求较高的场景下,帮助企业和研究机构降低推理部署成本、提升模型性能和效率、提供规模化大模型服务。我们希望通过开源,分享交流大模型语义Cache的相关技术。 + ## 快速部署 + 项目中启动服务脚本分为flask4modelcache.py 和 flask4modelcache_demo.py,其中: - flask4modelcache_demo.py 为快速测试服务,内嵌了sqlite和faiss,用户无需关心数据库相关事宜。 - flask4modelcache.py 为正常服务,需用户具备mysql和milvus等数据库服务。 + ### 环境依赖 + - python版本: 3.8及以上 - 依赖包安装: -```shell -pip install -r requirements.txt -``` + + ```shell + pip install -r requirements.txt + ``` + ### 服务启动 + #### Demo服务启动 + - 离线模型bin文件下载, 参考地址:[https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中。 - 执行flask4modelcache_demo.py启动服务。 + ```shell cd CodeFuse-ModelCache ``` @@ -55,7 +69,9 @@ python flask4modelcache_demo.py ``` #### 正常服务启动 + 在启动服务前,应该进行如下环境配置: + 1. 安装关系数据库 mysql, 导入sql创建数据表,sql文件:```reference_doc/create_table.sql``` 2. 安装向量数据库milvus 3. 在配置文件中添加数据库访问信息,配置文件为: @@ -63,9 +79,13 @@ python flask4modelcache_demo.py 2. ```modelcache/config/mysql_config.ini``` 4. 离线模型bin文件下载, 参考地址:[https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中 5. 通过flask4modelcache.py脚本启动后端服务。 + ## 服务访问 + 当前服务以restful API方式提供3个核心功能:数据写入,cache查询和cache数据清空。请求demo 如下: + ### cache写入 + ```python import json import requests @@ -78,7 +98,9 @@ data = {'type': type, 'scope': scope, 'chat_info': chat_info} headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) ``` + ### cache查询 + ```python import json import requests @@ -91,7 +113,9 @@ data = {'type': type, 'scope': scope, 'query': query} headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) ``` + ### cache清空 + ```python import json import requests @@ -104,11 +128,17 @@ data = {'type': type, 'scope': scope, 'remove_type': remove_type} headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) ``` + ## 文章 + https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ + ## 架构大图 + ![modelcache modules](docs/modelcache_modules_20240409.png) + ## 功能对比 + 功能方面,为了解决huggingface网络问题并提升推理速度,增加了embedding本地推理能力。鉴于SqlAlchemy框架存在一些限制,我们对关系数据库交互模块进行了重写,以更灵活地实现数据库操作。在实践中,大型模型产品需要与多个用户和多个模型对接,因此在ModelCache中增加了对多租户的支持,同时也初步兼容了系统指令和多轮会话。 @@ -233,6 +263,7 @@ https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ
    ## 核心功能 + 在ModelCache中,沿用了GPTCache的主要思想,包含了一系列核心模块:adapter、embedding、similarity和data_manager。adapter模块主要功能是处理各种任务的业务逻辑,并且能够将embedding、similarity、data_manager等模块串联起来;embedding模块主要负责将文本转换为语义向量表示,它将用户的查询转换为向量形式,并用于后续的召回或存储操作;rank模块用于对召回的向量进行相似度排序和评估;data_manager模块主要用于管理数据库。同时,为了更好的在工业界落地,我们做了架构和功能上的升级,如下: - [x] 架构调整(轻量化集成):以类redis的缓存模式嵌入到大模型产品中,提供语义缓存能力,不会干扰LLM调用和安全审核等功能,适配所有大模型服务。 @@ -252,24 +283,38 @@ https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ - 增加model字段和数据统计字段,用于功能拓展。 ## Todo List + ### Adapter + - [ ] register adapter for Milvus:根据scope中的model参数,初始化对应Collection 并且执行load操作。 + ### Embedding model&inference + - [ ] inference优化:优化embedding推理速度,适配fastertransformer, TurboTransformers, ByteTransformer等推理引擎。 - [ ] 兼容huggingface模型和modelscope模型,提供更多模型加载方式。 + ### Scalar Storage + - [ ] Support MongoDB。 - [ ] Support ElasticSearch。 + ### Vector Storage + - [ ] 在多模态场景中适配faiss存储。 + ### Ranking + - [ ] 增加Rank模型,对embedding召回后的数据,进行精排。 + ### Service + - [ ] 支持fastapi。 - [ ] 增加前端界面,用于测试。 ## 致谢 + 本项目参考了以下开源项目,在此对相关项目和研究开发人员表示感谢。
    [GPTCache](https://github.com/zilliztech/GPTCache) ## Contributing + ModelCache是一个非常有趣且有用的项目,我们相信这个项目有很大的潜力,无论你是经验丰富的开发者,还是刚刚入门的新手,都欢迎你为这个项目做出一些贡献,包括但不限于:提交问题和建议,参与代码编写,完善文档和示例。你的参与将会使这个项目变得更好,同时也会为开源社区做出贡献。 \ No newline at end of file diff --git a/docs/1.what-is-model-cache.md b/docs/1.what-is-model-cache.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/2.model-cache-features.md b/docs/2.model-cache-features.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/3.model-cache-quick-start.md b/docs/3.model-cache-quick-start.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/4.create-cache.md b/docs/4.create-cache.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/5.configure-cache.md b/docs/5.configure-cache.md new file mode 100644 index 0000000..e69de29 From 2a0a00fa5063309e67f8ca3987d2674ac838ab45 Mon Sep 17 00:00:00 2001 From: "Amber.Z" Date: Wed, 18 Dec 2024 19:53:58 +0800 Subject: [PATCH 75/98] add more content --- README.md | 88 +++++++++++--------- docs/1.what-is-model-cache.md | 132 ++++++++++++++++++++++++++++++ docs/2.model-cache-features.md | 29 +++++++ docs/3.model-cache-quick-start.md | 97 ++++++++++++++++++++++ docs/4.create-cache.md | 42 ++++++++++ docs/5.configure-cache.md | 0 6 files changed, 351 insertions(+), 37 deletions(-) delete mode 100644 docs/5.configure-cache.md diff --git a/README.md b/README.md index 8e9489c..6520171 100644 --- a/README.md +++ b/README.md @@ -23,14 +23,14 @@ ModelCache - [Quick start](#quick-start) - [Dependencies](#dependencies) - [Start service](#start-service) - - [Start Demo](#start-demo) + - [Start demo](#start-demo) - [Start normal service](#start-normal-service) -- [Access the service](#access-the-service) +- [Visit the service](#visit-the-service) - [Write cache](#write-cache) - [Query cache](#query-cache) - [Clear cache](#clear-cache) - [Function comparison](#function-comparison) -- [Core-Features](#core-features) +- [Features](#features) - [Todo List](#todo-list) - [Adapter](#adapter) - [Embedding model\&inference](#embedding-modelinference) @@ -60,12 +60,12 @@ Codefuse-ModelCache is a semantic cache for large language models (LLMs). By cac You can find the start script in `flask4modelcache.py` and `flask4modelcache_demo.py`. -- `flask4modelcache_demo.py` is a quick test service that embeds sqlite and faiss. You do not need to be concerned about database-related matters. -- `flask4modelcache.py` is the normal service that requires configuration of MySQL and Milvus. +- `flask4modelcache_demo.py`: A quick test service that embeds SQLite and FAISS. No database configuration required. +- `flask4modelcache.py`: The standard service that requires MySQL and Milvus configuration. ### Dependencies -- Python: V3.8 and above +- Python: V3.8 or above - Package installation ```shell @@ -74,10 +74,10 @@ You can find the start script in `flask4modelcache.py` and `flask4modelcache_dem ### Start service -#### Start Demo +#### Start demo -1. Download the embedding model bin file on [Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place the downloaded bin file in the model/text2vec-base-chinese folder. -2. Start the backend service by using `flask4modelcache_dome.py`. +1. Download the embedding model bin file from [Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place it in the `model/text2vec-base-chinese` folder. +2. Start the backend service: ```shell cd CodeFuse-ModelCache @@ -89,19 +89,23 @@ You can find the start script in `flask4modelcache.py` and `flask4modelcache_dem #### Start normal service -Before you start normal service, make sure that you have completed these steps: +Before you start standard service, do these steps: -1. Install the relational database MySQL and import the SQL file to create the data tables. You can find the SQL file in `reference_doc/create_table.sql`. +1. Install MySQL and import the SQL file from `reference_doc/create_table.sql`. 2. Install vector database Milvus. -3. Add the database access information to the configuration files: - 1. `modelcache/config/milvus_config.ini` - 2. `modelcache/config/mysql_config.ini` -4. Download the embedding model bin file from [Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Put the bin file in the `model/text2vec-base-chinese` directory. -5. Start the backend service by using the `flask4modelcache.py` script. +3. Configure database access in: + - `modelcache/config/milvus_config.ini` + - `modelcache/config/mysql_config.ini` +4. Download the embedding model bin file from [Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Put it in `model/text2vec-base-chinese`. +5. Start the backend service: -## Access the service + ```bash + python flask4modelcache.py + ``` -The current service provides three core functionalities through RESTful API.: Cache-Writing, Cache-Querying, and Cache-Clearing. Demos: +## Visit the service + +The service provides three core RESTful API functionalities: Cache-Writing, Cache-Querying, and Cache-Clearing. ### Write cache @@ -273,25 +277,35 @@ We've implemented several key updates to our repository. We've resolved network -## Core-Features - -In ModelCache, we adopted the main idea of GPTCache, includes core modules: adapter, embedding, similarity, and data_manager. The adapter module is responsible for handling the business logic of various tasks and can connect the embedding, similarity, and data_manager modules. The embedding module is mainly responsible for converting text into semantic vector representations, it transforms user queries into vector form.The rank module is used for sorting and evaluating the similarity of the recalled vectors. The data_manager module is primarily used for managing the database. In order to better facilitate industrial applications, we have made architectural and functional upgrades as follows: - -- [x] We have modified it similar to Redis and embedded it into the LLMs product, providing semantic caching capabilities. This ensures that it does not interfere with LLM calls, security audits, and other functionalities, achieving compatibility with all large-scale model services. -- [x] Multiple Model Loading Schemes: - - Support loading local embedding models to address Hugging Face network connectivity issues. - - Support loading various pretrained model embedding layers. -- [x] Data Isolation Capability - - Environment Isolation: Can pull different database configurations based on the environment to achieve environment isolation (dev, prepub, prod). - - Multi-tenant Data Isolation: Dynamically create collections based on the model for data isolation, addressing data isolation issues in multi-model/services scenarios in LLMs products. -- [x] Support for System Commands: Adopting a concatenation approach to address the issue of system commands in the prompt format. -- [x] Differentiation of Long and Short Texts: Long texts pose more challenges for similarity evaluation. To address this, we have added differentiation between long and short texts, allowing for separate configuration of threshold values for determining similarity. -- [x] Milvus Performance Optimization: The consistency_level of Milvus has been adjusted to "Session" level, which can result in better performance. -- [x] Data Management Capability: - - Ability to clear the cache, used for data management after model upgrades. - - Hitquery recall for subsequent data analysis and model iteration reference. - - Asynchronous log write-back capability for data analysis and statistics. - - Added model field and data statistics field for feature expansion. +## Features + +In ModelCache, we incorporated the core principles of GPTCache. ModelCache has four modules: adapter, embedding, similarity, and data_manager. + +- The adapter module orchestrates the business logic for various tasks, integrate the embedding, similarity, and data_manager modules. +- The embedding module converts text into semantic vector representations, and transforms user queries into vectors. +- The rank module ranks and evaluate the similarity of recalled vectors. +- The data_manager module manages the databases. + +To make ModelCache more suitable for industrial use, we made several improvements to its architecture and functionality: + +- [x] Architectural adjustment (lightweight integration): + - Embedded into LLM products using a Redis-like caching mode + - Provided semantic caching without interfering with LLM calls, security audits, and other functions + - Compatible with all LLM services +- [x] Multiple model loading: + - Supported local embedding model loading, and resolved Hugging Face network connectivity issues + - Supported loading embedding layers from various pre-trained models +- [x] Data isolation + - Environment isolation: Read different database configurations based on the environment. Isolate development, staging, and production environments. + - Multi-tenant data isolation: Dynamically create collections based on models for data isolation, addressing data separation issues in multi-model/service scenarios within large language model products +- [x] Supported system instruction: Adopted a concatenation approach to resolve issues with system instructions in the prompt paradigm. +- [x] Long and short text differentiation: Long texts bring more challenges for similarity assessment. Added differentiation between long and short texts, allowing for separate threshold configurations. +- [x] Milvus performance optimization: Adjusted Milvus consistency level to "Session" level for better performance. +- [x] Data management: + - One-click cache clearing to enable easy data management after model upgrades. + - Recall of hit queries for subsequent data analysis and model iteration reference. + - Asynchronous log write-back for data analysis and statistics + - Added model field and data statistics field to enhance features ## Todo List diff --git a/docs/1.what-is-model-cache.md b/docs/1.what-is-model-cache.md index e69de29..8fdbffa 100644 --- a/docs/1.what-is-model-cache.md +++ b/docs/1.what-is-model-cache.md @@ -0,0 +1,132 @@ +# What is ModelCache + +In ModelCache, we adopted the main idea of GPTCache, includes core modules: adapter, embedding, similarity, and data_manager. The adapter module is responsible for handling the business logic of various tasks and can connect the embedding, similarity, and data_manager modules. The embedding module is mainly responsible for converting text into semantic vector representations, it transforms user queries into vector form.The rank module is used for sorting and evaluating the similarity of the recalled vectors. The data_manager module is primarily used for managing the database. In order to better facilitate industrial applications, we have made architectural and functional upgrades as follows: + +## Architecture + +![modelcache modules](modelcache_modules_20240409.png) + +## Function comparison + +We've implemented several key updates to our repository. We've resolved network issues with Hugging Face and improved inference speed by introducing local embedding capabilities. Due to limitations in SqlAlchemy, we've redesigned our relational database interaction module for more flexible operations. We've added multi-tenancy support to ModelCache, recognizing the need for multiple users and models in LLM products. Lastly, we've made initial adjustments for better compatibility with system commands and multi-turn dialogues. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ModuleFunction
    ModelCacheGPTCache
    Basic InterfaceData query interface
    Data writing interface
    EmbeddingEmbedding model configuration
    Large model embedding layer
    BERT model long text processing
    Large model invocationDecoupling from large models
    Local loading of embedding model
    Data isolationModel data isolation
    Hyperparameter isolation
    DatabasesMySQL
    Milvus
    OceanBase
    Session managementSingle-turn dialogue
    System commands
    Multi-turn dialogue
    Data managementData persistence
    One-click cache clearance
    Tenant managementSupport for multi-tenancy
    Milvus multi-collection capability
    OtherLong-short dialogue distinction
    diff --git a/docs/2.model-cache-features.md b/docs/2.model-cache-features.md index e69de29..2294d03 100644 --- a/docs/2.model-cache-features.md +++ b/docs/2.model-cache-features.md @@ -0,0 +1,29 @@ +# ModelCache features + +This topic describes ModelCache features. In ModelCache, we incorporated the core principles of GPTCache. ModelCache has four modules: adapter, embedding, similarity, and data_manager. + +- The adapter module orchestrates the business logic for various tasks, integrate the embedding, similarity, and data_manager modules. +- The embedding module converts text into semantic vector representations, and transforms user queries into vectors. +- The rank module ranks and evaluate the similarity of recalled vectors. +- The data_manager module manages the databases. + +To make ModelCache more suitable for industrial use, we made several improvements to its architecture and functionality: + +- [x] Architectural adjustment (lightweight integration): + - Embedded into LLM products using a Redis-like caching mode. + - Provided semantic caching without interfering with LLM calls, security audits, and other functions. + - Compatible with all LLM services. +- [x] Multiple model loading: + - Supported local embedding model loading, and resolved Hugging Face network connectivity issues. + - Supported loading embedding layers from various pre-trained models. +- [x] Data isolation + - Environment isolation: Read different database configurations based on the environment. Isolate development, staging, and production environments. + - Multi-tenant data isolation: Dynamically create collections based on models for data isolation, addressing data separation issues in multi-model/service scenarios within large language model products. +- [x] Supported system instruction: Adopted a concatenation approach to resolve issues with system instructions in the prompt paradigm. +- [x] Long and short text differentiation: Long texts bring more challenges for similarity assessment. Added differentiation between long and short texts, allowing for separate threshold configurations. +- [x] Milvus performance optimization: Adjusted Milvus consistency level to "Session" level for better performance. +- [x] Data management: + - One-click cache clearing to enable easy data management after model upgrades. + - Recall of hit queries for subsequent data analysis and model iteration reference. + - Asynchronous log write-back for data analysis and statistics. + - Added model field and data statistics field to enhance features. diff --git a/docs/3.model-cache-quick-start.md b/docs/3.model-cache-quick-start.md index e69de29..7cd3903 100644 --- a/docs/3.model-cache-quick-start.md +++ b/docs/3.model-cache-quick-start.md @@ -0,0 +1,97 @@ +# Quick start + +This topic describes how to set up and use ModelCache. + +You can find the start script in `flask4modelcache.py` and `flask4modelcache_demo.py`. + +- `flask4modelcache_demo.py`: A quick test service that embeds SQLite and FAISS. No database configuration required. +- `flask4modelcache.py`: The standard service that requires MySQL and Milvus configuration. + +## Dependencies + +- Python: V3.8 or above +- Package installation + + ```shell + pip install -r requirements.txt + ``` + +## Start service + +### Start demo + +1. Download the embedding model bin file from [Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place it in the `model/text2vec-base-chinese` folder. +2. Start the backend service: + + ```shell + cd CodeFuse-ModelCache + ``` + + ```shell + python flask4modelcache_demo.py + ``` + +### Start standard service + +Before you start standard service, do these steps: + +1. Install MySQL and import the SQL file from `reference_doc/create_table.sql`. +2. Install vector database Milvus. +3. Configure database access in: + - `modelcache/config/milvus_config.ini` + - `modelcache/config/mysql_config.ini` +4. Download the embedding model bin file from [Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Put it in `model/text2vec-base-chinese`. +5. Start the backend service: + + ```bash + python flask4modelcache.py + ``` + +## Visit the service + +The service provides three core RESTful API functionalities: Cache-Writing, Cache-Querying, and Cache-Clearing. + +### Write cache + +```python +import json +import requests +url = 'http://127.0.0.1:5000/modelcache' +type = 'insert' +scope = {"model": "CODEGPT-1008"} +chat_info = [{"query": [{"role": "system", "content": "You are an AI code assistant and you must provide neutral and harmless answers to help users solve code-related problems."}, {"role": "user", "content": "你是谁?"}], + "answer": "Hello, I am an intelligent assistant. How can I assist you?"}] +data = {'type': type, 'scope': scope, 'chat_info': chat_info} +headers = {"Content-Type": "application/json"} +res = requests.post(url, headers=headers, json=json.dumps(data)) +``` + +### Query cache + +```python +import json +import requests +url = 'http://127.0.0.1:5000/modelcache' +type = 'query' +scope = {"model": "CODEGPT-1008"} +query = [{"role": "system", "content": "You are an AI code assistant and you must provide neutral and harmless answers to help users solve code-related problems."}, {"role": "user", "content": "Who are you?"}] +data = {'type': type, 'scope': scope, 'query': query} + +headers = {"Content-Type": "application/json"} +res = requests.post(url, headers=headers, json=json.dumps(data)) +``` + +### Clear cache + +```python +import json +import requests +url = 'http://127.0.0.1:5000/modelcache' +type = 'remove' +scope = {"model": "CODEGPT-1008"} +remove_type = 'truncate_by_model' +data = {'type': type, 'scope': scope, 'remove_type': remove_type} + +headers = {"Content-Type": "application/json"} +res = requests.post(url, headers=headers, json=json.dumps(data)) +``` diff --git a/docs/4.create-cache.md b/docs/4.create-cache.md index e69de29..e0506ba 100644 --- a/docs/4.create-cache.md +++ b/docs/4.create-cache.md @@ -0,0 +1,42 @@ +# Create cache + +This topic describes how to create cache. + +## Default cache interface + +```py +class Cache: + # ModelCache calls it whe you start the cache system + def __init__(self): + self.has_init = False + self.cache_enable_func = None + self.embedding_func = None + self.post_process_messages_func = None + self.config = Config() +``` + +This function embeds text into dense vectors for context similarity search. ModelCache supports these embedding context methods: Huggingface, ONNX, and SentenceTransformers. The default model is text2vec Hugging Face because it performs better for Chinese. Simply initialize your embedding function as `text2vec.to_embeddings`. + +```py +data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), + VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) + +cache.init( + embedding_func=data2vec.to_embeddings, + data_manager=data_manager, + similarity_evaluation=SearchDistanceEvaluation(), + query_pre_embedding_func=query_multi_splicing, + insert_pre_embedding_func=insert_multi_splicing, +) +``` + +data_manager CacheVase stores all scalar data, such as original questions, prompts, answers, and access times. ModelCache supports multiple cache storages like SQLite, MySQL, and OceanBase. NoSQL databases will be supported in the future. + +data_manager VectorBase stores and searches all embedding vectors to find semantically similar results. ModelCache supports using vector search libraries like FAISS or vector databases like Milvus. More vector database and cloud service will be supported in the future. + +## Examples + +```py +data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension)) +data_manager = get_data_manager(CacheBase("oceanbase"), VectorBase("milvus", dimension=data2vec.dimension)) +``` diff --git a/docs/5.configure-cache.md b/docs/5.configure-cache.md deleted file mode 100644 index e69de29..0000000 From ac759ab2450e0add41fdcd7113708809e2792f32 Mon Sep 17 00:00:00 2001 From: charleschile Date: Wed, 18 Dec 2024 22:39:45 +0800 Subject: [PATCH 76/98] bge-m3 embedding generator --- modelcache/embedding/__init__.py | 6 +++++- modelcache/embedding/bge_m3.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 modelcache/embedding/bge_m3.py diff --git a/modelcache/embedding/__init__.py b/modelcache/embedding/__init__.py index 5684a2d..a7ab511 100644 --- a/modelcache/embedding/__init__.py +++ b/modelcache/embedding/__init__.py @@ -7,6 +7,7 @@ paddlenlp = LazyImport("paddlenlp", globals(), "modelcache.embedding.paddlenlp") timm = LazyImport("timm", globals(), "modelcache.embedding.timm") huggingface_tei = LazyImport("huggingface_tei", globals(), "modelcache.embedding.huggingface_tei") +bge_m3 = LazyImport("bge_m3", globals(), "modelcache.embedding.bge_m3") def Huggingface(model="sentence-transformers/all-mpnet-base-v2"): @@ -33,4 +34,7 @@ def Timm(model="resnet50", device="default"): return timm.Timm(model, device) def HuggingfaceTEI(base_url, model): - return huggingface_tei.HuggingfaceTEI(base_url, model) \ No newline at end of file + return huggingface_tei.HuggingfaceTEI(base_url, model) + +def BgeM3Embedding(model_path="model/bge-m3"): + return bge_m3.BgeM3Embedding(model_path) \ No newline at end of file diff --git a/modelcache/embedding/bge_m3.py b/modelcache/embedding/bge_m3.py new file mode 100644 index 0000000..10cc15c --- /dev/null +++ b/modelcache/embedding/bge_m3.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +import numpy as np +from modelcache.embedding.base import BaseEmbedding +from transformers import AutoTokenizer, AutoModel +from FlagEmbedding import BGEM3FlagModel + +class BgeM3Embedding(BaseEmbedding): + def __init__(self, model_path: str = "model/bge-m3"): + self.tokenizer = AutoTokenizer.from_pretrained(model_path) + self.model = AutoModel.from_pretrained(model_path) + + self.bge_model = BGEM3FlagModel(model_name_or_path=model_path, + model=self.model, + tokenizer=self.tokenizer, + use_fp16=False) + + self.__dimension = 768 + + def to_embeddings(self, data, **_): + if not isinstance(data, list): + data = [data] + + embeddings = self.bge_model.encode(data, batch_size=12, max_length=8192)['dense_vecs'] + return np.array(embeddings).astype("float32") + + @property + def dimension(self): + return self.__dimension \ No newline at end of file From 992f3b9aa67aa9000992f26b4390ab7c5b58bca2 Mon Sep 17 00:00:00 2001 From: charleschile Date: Wed, 18 Dec 2024 23:11:23 +0800 Subject: [PATCH 77/98] reranker --- modelcache/adapter/adapter_query.py | 141 +++++++++++++++++++--------- 1 file changed, 96 insertions(+), 45 deletions(-) diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index a8c90f1..e3be30a 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -5,7 +5,9 @@ from modelcache.utils.error import NotInitError from modelcache.utils.time import time_cal from modelcache.processor.pre import multi_analysis +from FlagEmbedding import FlagReranker +USE_RERANKER = True # 如果为 True 则启用 reranker,否则使用原有逻辑 def adapt_query(cache_data_convert, *args, **kwargs): chat_cache = kwargs.pop("cache_obj", cache) @@ -74,53 +76,102 @@ def adapt_query(cache_data_convert, *args, **kwargs): if rank_pre < rank_threshold: return - for cache_data in cache_data_list: - primary_id = cache_data[1] - ret = chat_cache.data_manager.get_scalar_data( - cache_data, extra_param=context.get("get_scalar_data", None) - ) - if ret is None: - continue + if USE_RERANKER: + reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=False) + for cache_data in cache_data_list: + primary_id = cache_data[1] + ret = chat_cache.data_manager.get_scalar_data( + cache_data, extra_param=context.get("get_scalar_data", None) + ) + if ret is None: + continue - if "deps" in context and hasattr(ret.question, "deps"): - eval_query_data = { - "question": context["deps"][0]["data"], - "embedding": None - } - eval_cache_data = { - "question": ret.question.deps[0].data, - "answer": ret.answers[0].answer, - "search_result": cache_data, - "embedding": None, - } - else: - eval_query_data = { - "question": pre_embedding_data, - "embedding": embedding_data, - } + rank = reranker.compute_score([pre_embedding_data, ret[0]], normalize=True) - eval_cache_data = { - "question": ret[0], - "answer": ret[1], - "search_result": cache_data, - "embedding": None - } - rank = chat_cache.similarity_evaluation.evaluation( - eval_query_data, - eval_cache_data, - extra_param=context.get("evaluation_func", None), - ) + if "deps" in context and hasattr(ret.question, "deps"): + eval_query_data = { + "question": context["deps"][0]["data"], + "embedding": None + } + eval_cache_data = { + "question": ret.question.deps[0].data, + "answer": ret.answers[0].answer, + "search_result": cache_data, + "embedding": None, + } + else: + eval_query_data = { + "question": pre_embedding_data, + "embedding": embedding_data, + } + + eval_cache_data = { + "question": ret[0], + "answer": ret[1], + "search_result": cache_data, + "embedding": None + } + + if len(pre_embedding_data) <= 256: + if rank_threshold <= rank: + cache_answers.append((rank, ret[1])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) + else: + if rank_threshold_long <= rank: + cache_answers.append((rank, ret[1])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) + else: + # 不使用 reranker 时,走原来的逻辑 + for cache_data in cache_data_list: + primary_id = cache_data[1] + ret = chat_cache.data_manager.get_scalar_data( + cache_data, extra_param=context.get("get_scalar_data", None) + ) + if ret is None: + continue + + if "deps" in context and hasattr(ret.question, "deps"): + eval_query_data = { + "question": context["deps"][0]["data"], + "embedding": None + } + eval_cache_data = { + "question": ret.question.deps[0].data, + "answer": ret.answers[0].answer, + "search_result": cache_data, + "embedding": None, + } + else: + eval_query_data = { + "question": pre_embedding_data, + "embedding": embedding_data, + } + + eval_cache_data = { + "question": ret[0], + "answer": ret[1], + "search_result": cache_data, + "embedding": None + } + rank = chat_cache.similarity_evaluation.evaluation( + eval_query_data, + eval_cache_data, + extra_param=context.get("evaluation_func", None), + ) + + if len(pre_embedding_data) <= 256: + if rank_threshold <= rank: + cache_answers.append((rank, ret[1])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) + else: + if rank_threshold_long <= rank: + cache_answers.append((rank, ret[1])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) - if len(pre_embedding_data) <= 256: - if rank_threshold <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) - cache_ids.append((rank, primary_id)) - else: - if rank_threshold_long <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) - cache_ids.append((rank, primary_id)) cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True) cache_questions = sorted(cache_questions, key=lambda x: x[0], reverse=True) cache_ids = sorted(cache_ids, key=lambda x: x[0], reverse=True) @@ -141,4 +192,4 @@ def adapt_query(cache_data_convert, *args, **kwargs): logging.info('update_hit_count except, please check!') chat_cache.report.hint_cache() - return cache_data_convert(return_message, return_query) + return cache_data_convert(return_message, return_query) \ No newline at end of file From e6ef83c757dd0dff13c8074bdc1919a8c0451d9d Mon Sep 17 00:00:00 2001 From: "Amber.Z" Date: Fri, 20 Dec 2024 14:46:34 +0800 Subject: [PATCH 78/98] update --- README.md | 3 +- README_CN.md | 85 +++++++++++++++++++++++++++++++--------------------- 2 files changed, 53 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 6520171..c500d7d 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,7 @@ ModelCache ## News +- 🔥🔥[2024.10.22] Added tasks for 1024 developer day. - 🔥🔥[2024.04.09] Added Redis Search to store and retrieve embeddings in multi-tenant. This can reduce the interaction time between Cache and vector databases to 10ms. - 🔥🔥[2023.12.10] Integrated LLM embedding frameworks such as 'llmEmb', 'ONNX', 'PaddleNLP', 'FastText', and the image embedding framework 'timm' to bolster embedding functionality. - 🔥🔥[2023.11.20] Integrated local storage, such as sqlite and faiss. This enables you to initiate quick and convenient tests. @@ -60,7 +61,7 @@ Codefuse-ModelCache is a semantic cache for large language models (LLMs). By cac You can find the start script in `flask4modelcache.py` and `flask4modelcache_demo.py`. -- `flask4modelcache_demo.py`: A quick test service that embeds SQLite and FAISS. No database configuration required. +- `flask4modelcache_demo.py`: A quick test service that embeds SQLite and FAISS. No database configuration required. - `flask4modelcache.py`: The standard service that requires MySQL and Milvus configuration. ### Dependencies diff --git a/README_CN.md b/README_CN.md index 89c3fad..b2e70a9 100644 --- a/README_CN.md +++ b/README_CN.md @@ -16,19 +16,35 @@ ModelCache ## Contents +- [Contents](#contents) - [新闻](#新闻) - [项目简介](#项目简介) +- [架构大图](#架构大图) - [快速部署](#快速部署) + - [环境依赖](#环境依赖) + - [启动服务](#启动服务) + - [启动 Demo](#启动-demo) + - [启动标准服务](#启动标准服务) - [服务访问](#服务访问) + - [写入 cache](#写入-cache) + - [查询 cache](#查询-cache) + - [清空 cache](#清空-cache) - [文章](#文章) -- [架构大图](#架构大图) +- [功能对比](#功能对比) - [核心功能](#核心功能) +- [Todo List](#todo-list) + - [Adapter](#adapter) + - [Embedding model\&inference](#embedding-modelinference) + - [Scalar Storage](#scalar-storage) + - [Vector Storage](#vector-storage) + - [Ranking](#ranking) + - [Service](#service) - [致谢](#致谢) -- [Contributing](#Contributing) +- [Contributing](#contributing) ## 新闻 -- 🔥🔥[2024.10.22] 增加1024程序员节任务。 +- 🔥🔥[2024.10.22] 增加1024程序员节任务。 - 🔥🔥[2024.04.09] 增加了多租户场景中Redis Search存储和检索embedding的能力,可以将Cache和向量数据库的交互耗时降低至10ms内。 - 🔥🔥[2023.12.10] 增加llmEmb、onnx、paddlenlp、fasttext等LLM embedding框架,并增加timm 图片embedding框架,用于提供更丰富的embedding能力。 - 🔥🔥[2023.11.20] codefuse-ModelCache增加本地存储能力, 适配了嵌入式数据库sqlite、faiss,方便用户快速启动测试。 @@ -38,53 +54,58 @@ ModelCache Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存已生成的模型结果,降低类似请求的响应时间,提升用户体验。该项目从服务优化角度出发,引入缓存机制,在资源有限和对实时性要求较高的场景下,帮助企业和研究机构降低推理部署成本、提升模型性能和效率、提供规模化大模型服务。我们希望通过开源,分享交流大模型语义Cache的相关技术。 +## 架构大图 + +![modelcache modules](docs/modelcache_modules_20240409.png) + ## 快速部署 -项目中启动服务脚本分为flask4modelcache.py 和 flask4modelcache_demo.py,其中: +项目中启动服务脚本分为 `flask4modelcache.py` 和 `flask4modelcache_demo.py`,其中: -- flask4modelcache_demo.py 为快速测试服务,内嵌了sqlite和faiss,用户无需关心数据库相关事宜。 -- flask4modelcache.py 为正常服务,需用户具备mysql和milvus等数据库服务。 +- `flask4modelcache_demo.py` 为快速测试服务,内嵌了 SQLite 和 FAISS,用户无需关心数据库相关事宜。 +- `flask4modelcache.py` 为正常服务,需用户具备 MySQL 和 Milvus 等数据库服务。 ### 环境依赖 -- python版本: 3.8及以上 +- python版本: 3.8 及以上 - 依赖包安装: ```shell pip install -r requirements.txt ``` -### 服务启动 +### 启动服务 -#### Demo服务启动 +#### 启动 Demo -- 离线模型bin文件下载, 参考地址:[https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中。 -- 执行flask4modelcache_demo.py启动服务。 +- 离线模型 bin 文件下载, 参考地址:[Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的 bin 文件,放到 `model/text2vec-base-chinese` 文件夹中。 +- 执行 `flask4modelcache_demo.py` 启动服务。 -```shell -cd CodeFuse-ModelCache -``` -```shell -python flask4modelcache_demo.py -``` + ```shell + cd CodeFuse-ModelCache + ``` + + ```shell + python flask4modelcache_demo.py + ``` -#### 正常服务启动 +#### 启动标准服务 -在启动服务前,应该进行如下环境配置: +在启动标准服务前,应该进行如下环境配置: -1. 安装关系数据库 mysql, 导入sql创建数据表,sql文件:```reference_doc/create_table.sql``` -2. 安装向量数据库milvus +1. 安装关系数据库 MySQL, 导入 SQL 创建数据表,MySQL 文件:```reference_doc/create_table.sql```。 +2. 安装向量数据库 Milvus。 3. 在配置文件中添加数据库访问信息,配置文件为: 1. ```modelcache/config/milvus_config.ini``` 2. ```modelcache/config/mysql_config.ini``` -4. 离线模型bin文件下载, 参考地址:[https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中 +4. 离线模型 bin 文件下载, 参考地址:[Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的 bin 文件,放到 `model/text2vec-base-chinese` 文件夹中。 5. 通过flask4modelcache.py脚本启动后端服务。 ## 服务访问 -当前服务以restful API方式提供3个核心功能:数据写入,cache查询和cache数据清空。请求demo 如下: +当前服务以 restful API 方式提供 3 个核心功能:数据写入,cache 查询和 cache 数据清空。请求 demo 如下: -### cache写入 +### 写入 cache ```python import json @@ -99,7 +120,7 @@ headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) ``` -### cache查询 +### 查询 cache ```python import json @@ -114,7 +135,7 @@ headers = {"Content-Type": "application/json"} res = requests.post(url, headers=headers, json=json.dumps(data)) ``` -### cache清空 +### 清空 cache ```python import json @@ -133,13 +154,9 @@ res = requests.post(url, headers=headers, json=json.dumps(data)) https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ -## 架构大图 - -![modelcache modules](docs/modelcache_modules_20240409.png) - ## 功能对比 -功能方面,为了解决huggingface网络问题并提升推理速度,增加了embedding本地推理能力。鉴于SqlAlchemy框架存在一些限制,我们对关系数据库交互模块进行了重写,以更灵活地实现数据库操作。在实践中,大型模型产品需要与多个用户和多个模型对接,因此在ModelCache中增加了对多租户的支持,同时也初步兼容了系统指令和多轮会话。 +功能方面,为了解决 Hugging Face 网络问题并提升推理速度,增加了 embedding 本地推理能力。鉴于 SqlAlchemy 框架存在一些限制,我们对关系数据库交互模块进行了重写,以更灵活地实现数据库操作。在实践中,大型模型产品需要与多个用户和多个模型对接,因此在 ModelCache 中增加了对多租户的支持,同时也初步兼容了系统指令和多轮会话。 @@ -264,7 +281,7 @@ https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ ## 核心功能 -在ModelCache中,沿用了GPTCache的主要思想,包含了一系列核心模块:adapter、embedding、similarity和data_manager。adapter模块主要功能是处理各种任务的业务逻辑,并且能够将embedding、similarity、data_manager等模块串联起来;embedding模块主要负责将文本转换为语义向量表示,它将用户的查询转换为向量形式,并用于后续的召回或存储操作;rank模块用于对召回的向量进行相似度排序和评估;data_manager模块主要用于管理数据库。同时,为了更好的在工业界落地,我们做了架构和功能上的升级,如下: +在ModelCache 中,沿用了 GPTCache 的主要思想,包含了一系列核心模块:adapter、embedding、similarity 和 data_manager。adapter模块主要功能是处理各种任务的业务逻辑,并且能够将 embedding、similarity、data_manager等模块串联起来;embedding 模块主要负责将文本转换为语义向量表示,它将用户的查询转换为向量形式,并用于后续的召回或存储操作;rank 模块用于对召回的向量进行相似度排序和评估;data_manager 模块主要用于管理数据库。同时,为了更好的在工业界落地,我们做了架构和功能上的升级,如下: - [x] 架构调整(轻量化集成):以类redis的缓存模式嵌入到大模型产品中,提供语义缓存能力,不会干扰LLM调用和安全审核等功能,适配所有大模型服务。 - [x] 多种模型加载方案: @@ -286,11 +303,11 @@ https://mp.weixin.qq.com/s/ExIRu2o7yvXa6nNLZcCfhQ ### Adapter -- [ ] register adapter for Milvus:根据scope中的model参数,初始化对应Collection 并且执行load操作。 +- [ ] register adapter for Milvus:根据 scope 中的 model 参数,初始化对应 Collection 并且执行 load 操作。 ### Embedding model&inference -- [ ] inference优化:优化embedding推理速度,适配fastertransformer, TurboTransformers, ByteTransformer等推理引擎。 +- [ ] inference 优化:优化 embedding 推理速度,适配fastertransformer、TurboTransformers 和 ByteTransformer 等推理引擎。 - [ ] 兼容huggingface模型和modelscope模型,提供更多模型加载方式。 ### Scalar Storage From 19873489f564a0c229c2f5734f8e2b82afcfca04 Mon Sep 17 00:00:00 2001 From: "Amber.Z" Date: Fri, 20 Dec 2024 15:27:59 +0800 Subject: [PATCH 79/98] add MultiModal Cache --- .../cache-service-cost-time-distribution.webp | Bin 0 -> 11504 bytes docs/time-cost-comparison.webp | Bin 0 -> 64626 bytes mulicache-readme-cn.md | 127 ++++++++++++++++++ 3 files changed, 127 insertions(+) create mode 100644 docs/cache-service-cost-time-distribution.webp create mode 100644 docs/time-cost-comparison.webp create mode 100644 mulicache-readme-cn.md diff --git a/docs/cache-service-cost-time-distribution.webp b/docs/cache-service-cost-time-distribution.webp new file mode 100644 index 0000000000000000000000000000000000000000..d22a1f42a755560a9b7355b0578275ed38644581 GIT binary patch literal 11504 zcma)CW0YiTmW{M4ZQHhO+qP}nwr!(Qm3CI8ZKJZ%cIH%1Pxrj(p7&PAue(;9d%uId z_t`gAC`pQm{iFc^)J269)D$=fHhy0-gaYLPQE7qA0`u9jrAw9&6%`UBb36sWMOxZ@ zex9NBMn3mpZJ#}a-;<^39P;{~T-%-{zg>{KeY+_yh(G_R^QU?R_-EeT{}i5Z&z66M zSLSofi{Y#6{qZiqW#UBOqyG+ZUEC*s?!U%w{I>t5eB|Hir}ClfBkpgw8=c=f@q+#q zdHI$VRQ{&^a5(}m@qz9y{R`$<|0e#Df4$eXx8dr;>hs)x`8~iV=%x1# z|3-g@fBbXl6YeMTJO87+axdRU?+5$&k28>G^gH-l{cV5m&;GaPZPF9GzTOG`&d>Ev z+-KRl;dl0T``i5EUc2rQzu*t^PxR;BfZikg75;7i;7`U^`kn3BcG@KcR)F9s@OSV6 zNM3!>*)QF0qgmoSeNr&vcD>6|tb{vJ^h&~Y2AzFpo8fXfv0KtoQ7$b_BS}v6K zd=MT2+@2#KCDh=gkrhdvL^S;pOPx+34Uu=jJ6a3gGA;zWfak1Od-2f>Oe#MJK~tC_ zqG+-aBvTgI#UB*{!lVMVwQgpXNTuPdI4+Y$R{J!F(FtIhV)e=k7YiX0JT|-Bi_C6d zknM&0t)wVVzG7&B*!sm+}*L+IyBH6C*W`oCl;?i z1pai@;DoDPN{zV z(yILp`9FQ~7n$?wJ_+tH1*Cu+n+XG!uS|C0uRr``-om8?fb6ZAkwg;cM{6cD^!|{~ z|3KB?zssxsWs)7pc@and;`pWv^ar&4Gpm0V!ndlAmtz6Rs~nxItz&_=I#%jt@;}i0 z_u$`zAS`6m3H%P8ix%C)5U-ih(EVpmS6>(x`7n^_#Z7loDf>H*{+`^w_nuuG{goy& z!85SC$N(R&nbFYeziIdXgd5-#n70Kl-UNo1X&EVAJ^W>=U8!fI>(u-6itSq`=~8@n z6aU{F|Is(~q;ASgoujUX4npzU3#ug8^vA<+Sc<^;fF_kCNl4Df$hF2fgbYLIk{ z@D4}*fyh~l2yy}nCtRV(XqZ0=$^Wg$#EYj71N+?Mnsa59nP5rruy?fkuZnM=9_Qyw zx51Zvw_inQI`zirPbh&?x94L2gE6O_?=qfMR{; zX}h*jho<-kbN=xv z-A@cYU^r7e?Wb1iP?SH@i{F}HjUj0@XiLaQB5{c(K15_RaQ|;g@sE0cBP`?9a_3&O zIAZB;@A^mle;6;vLgLzF-p~7!bn4jP=|1z%A+^L1uTeLLj3g3w(BSyjOu`yLJVjj- zGLlH#NX5dYpZ>e3{Ug;sjOISj#7hAbYt()5UO4s~5nLkr&jD=;!rO&~$oVWvAyFYX znTo2z>xyiYakRTcYm4I7{#9RvV`~=nt7y-HfAODo>G#oqS5wb)K$Aa#T^a_~WpEZ* zoOkoI%D#}elO&ZVV1j;D@h=pjfd~s3b&R9vPNZVycWsmbf2NUtlOY$``#^+?-jmU* z|2TH(4n}GP&cYuK3itbw{$EG}U=v6GH6iCHF;!0MnP6?WJMgQq5{;wST z7E+M&)a#Rxci2Z>#q#_?A+j*{8$*hizY?DRIR#z-PuI17G z%C@Uh#)P`Tb4)h(ERI`g>>`iDR4b)8B0aYuTX2+ITf3HenL*gx z{5H-A8Z9ysB90}o;?^@x#PC`nfs@SC*tcr^AaoDZmJ}{u9Op&68XP^)Z=&aHGz8JG zGm~D4_;pGmX*U$aItV7u15tor5v@i_O&*Eg>Vvw7(?c<-n+cwmJ-~oY`0;z6GxMqC zi%9pC;{EmnRb;Sll<0}ly1yAVQ;Ib-1ffCOB{rjfpBXWtFAv}{-#{Dq?H;Bs2#pd0 zjP<&?e%pa^*4|LpSgrk~f0a^j#GwwmO5-@6TNHXnv!ZT#gq)P`tv|fA=8id!o+sQ+ z`*|=IZ@BzspCE35s~Ts1)@{INjQZs(e5~MRsCu_?14=U6{TuwRF8gR^VqegH?R%27X{_sRb`H^T%^wQqb6JGe9#73_m)FqVKiLC3DO>f? zE^Q<7Ga&-tcHjwBz8!}r8E*CJmQQVD1_o5zYdynrkW4?-d$N(7UC(7X1-jaNfk>I_ zmcH>S_nWC9?vFm8?sS`2OLG#2syme7ip%56vkcr(@OgzsRYYzy2;FIB$=4f(A_WXL zDmDONvCB8~8f>o0I~&!DqspD=It$`I5pK)aOzHp3TJFA>v@lm%os$w-886;%OlshkGFcwzaCGe}d?rh0<%Os!cJ!;5Zzns~ywqRHD5BC(C zBOsbCQl`SvyLOw$G-~Fk$zBFNNMntKSKU&fQ*NXVMD5P%WHRwN}xQRB4$Z{f@8q%PR$r^%d$F(TbfEg-e(W?5$ zFp@bdUyVJ#1{A~AwTR8pLy^O(?rE2$5im1OF}ZpK+?n2+cJUKJF-bfFoI(Lo5q!}nHhI8cy07yrq>770Q51;aW-+MUP_L{fYZ9G;5vmz@tW(6z+AMLv9zY|aj)cIYO82{qc%{pgfyFa%aa(=t!N~pNt9g$mBxSEK@_cDA^s|R>PEle1#I~+rUUp z)PMY)vZ~0B1zP$hK(ezgwfy#iC}~P4KO9$jg({S*Zz0mqEoM!<)|2qzp$5P97_XO% zv&hyeZ>eE>8t3$$a|w2^U0{?(RLYA+2~4Pu!pl@SLZDdXDdRFfpO?4Fxs8=z2u$Mw ziMPl1$-JYlwiS~9rOzm=t{sJ$7HjEo#GPbUu9Jkuw(+zklWaZI2!`}s4qMwAI@`vO zaIjM&szByKkqchZX4z~jNU-6U&4GStM)29GWOSCA*sPDF>imT+?pHf793A=Qd8u(3 z&k=DA`*zGS9xq%NLXc!82rWk5_lmQDP0&PQnwP5Cu|V2PDi|Tdu6@?&20iOKro7!( z!j7!6#_w}Z;C!zcY-VH29!Pjn3WX-EODmC}aU|`hH@j9^3HeaMmr=+Pc&wv|=rXSf z;JL3f%5}(P>sYRam(GnhX@k0~ur{NIGoF%Hs)zKLC+=y6alCw8?<>R6{DB8oFEy5@ z8!^OlPrVM0h{Hg zGLmU=8aJI$c?l;wK2Y~mRsLM4Ebw?a$`Iz;;u2vOqJjkwhJ~eLAGB_x%S0^pLxF3W z#o+vnP;gG;#=*;%NAaN?jFyOFmk^Co9Z;*sp3R5dsHwtMUl7&vQsmSqmDxc&KeN*I z%sgym~TpPr+^1p%SVnM#%Z(pVTn@S-U|Yi_c!E$n^P4X!p$MElHdJe zJY`)v5vKU;WhN(YJMy<#pDxX+-QErhOu%XTjm}}Pp&y+=2(nNZJVMpeddtwXrb6#U>TFZn`Le+Tg*%fTV~E)|ju` z)sv1coY#-fnabXI$f;za(N=qpURvvf^P7GT0G!@-hh6T$q^SRXzsCKo-M~myIb4eS4k5@5f-Vs5N(4l=iRnB zcF3FXse}R(W*>{4j22Ir&dUPWPjXVcdd69&m;yb zf=JxGK?gVU9@?VvwdN#t$a4~waH}WA4f*vxZsw@BSXWu|Q1g71*PhcY4=#l^{ld~) zifQM^&PHq#kR5GTuipx{y{N*T<|#CkDE;o0HF*#)c6&qr81(seM)5XiDC4!$Jaakn z;b=wDBi(4Ew5LiTd_dPTTnzVsAZiX!$`tB56v^auMI|YCOV0Y8Pz8inW z;?Q&};(G*%cS=YjtKFB%Kp&gAg#nYNh--ytMlEx`2r5H?oqVx|a|b;M;=8pF#hxSA zY9{3;Z#n>g`w&-BC3I#@V7OF;5NwX01Rw{GZ71g|W)pFjHE*Uu>igc79tXiXMdDU5d=w!ZFHiC$)SCj5gqQIOp zHuzD4q7IMH0IBnb4jLYBxBEm@dtS=}6IPwlM(ZnvkBDMz^6WdT5O)rL#kqUNAtEhG z?nTbxYM?i>>B+Oyusk1naKeMG7leaba)LzoZpO;$9`K>F8{Hxy>*bntktfOPs|YIX z&XrXyOi~}?5mLp$*(}pcpQhXbhs?4KirkZgji0L4bX21U$p_86ku~)s88*qg$qp^6 zxA4?2POh1Y%;jl5s~k7sI|=%Jt#^+y@&R1yik@^8uT%hdl;g|W&s&^fa}e%caK_Tr zkBJMnqP~6`&TiW~D)yhG%wR?!?uE(pRd4xcZp!qj%C$Ny(mwDdFt$Zx2(&+x8WP>g zIZR(_A@8(iFy;Mjm?2&p6>dR3!h_qm+#*sf@cuZ(rx7W;e{;%Y+;vW}uoD)wVckv;%m)T|?k z1>Izu3nvcMD|k87p|S&D$}E!MYuNw!63);Ni9TiTPZKJq8u>NDQC(yGfh(#kfRongHQx%IZ2`hH?=6`vs&5{`Pz5*0`Q zAXlLJRY=QCQ#KJQwm#mYR96yY;k+2M+|{^86g|OE=Fpiw`)a;&+AXGXcAYp8^;lP%@4jA*-r%2>KRwh(q*=X;Pmgsl0vAW;Xgg+N) zAbG1L@u?GdxC&TJwIoBzkPqnZUktAkFt1HkjhPYv=5K8mHH ze2|;yqH~Ij;P7&;2(;35^l*HYMFeYv+y&5+HsYxW(p{n+Ht3&1_mCgx+k&7@`#A|u zc6a2!&&n@TLlpp2zQGHX8f_+{qHyvr+O!aKeb2S$HIV2*Xpb$T zWv3$w&3OyR@P66gBmkk)$LUi862NeTyw(jVHwS7_T(7_aqhwpJcdaf2+ZPl*SElT! zrh-CJSFH*XTh^KmlX!&ykjoC>!^tx=s*I8p*o!VLNm`}wmgh2Y&5HhH|1uUht-H@o z?JLp(Fm)g{#o0dc5;ibv6Hx(i$eX>+c0b)!Stg1%fnrGmP|ep1nieQo=8_V4tu*;l zn*6{w<7^z}z}Ge>iuT=e%IYBbD^RJP}}LTJ^Jtodd)x(dml?=h?J zqw2$24$KUdTh`RMUz$DL3kIk>DnPTN!Q3nw4@^M;T>C0kvuNpHa}j_&xD{wudp=R* zvu|2WZN6#yRADNq9-BXZqOqzjv{~lxs}OngaG}Bk(g95FZv1Ge#Hpg-BE1(?)M_aF z&0B%B*_eM?U14^)AjGv%QMy%&G(@x!kUbBw2wgwnD38G&a$f-`0ARyumK{$~dMPbF z`^=@4*Xn+QF&osG1LiIGqcQl1g@Wef?K_bAhYL6ODW!TMzPHV zhJ4Os!-TKc&yj~>$TKW|2tuoM(oM(ktdWyMFNPri2D%yRpxRZ%>XwDLm~W|8W-lUn za%({-{)`7JD<-w87`OUh)BD(p7(Q1Qp~l@%5c4bEI(E)dToDx8eCXbw@9T@W5%)l8 zQlZHf5@*5u2E$V4n{$tg4G%U+1Q{iMD0=D?Q;Qvb&29AP9_=!Q(?GC)05Zoi}PyP z9eVxSEjT^WlEQAkGN+df@U9)Q=1b5f!LrgGiw@Gd2LL(fU!=;5RB2 z(xgG{$w_q_mGbM3TYuId%Q{h?*JhON6aQQ8Udi3s>aYbFfmK#p$F+wir9C3{5AB|TdssKv?X&0uiA>(270r7QOJrO_j11-pN;a!Q;X|+LL-7%)sgMp2wkpZZ9%93bt=#2n}<#9j&lT z*_A@fR5triA^ij{z+(Nt(6SZk-{l>`@si}n3Pj&n#4J~#09Z8hhi9K#ZYNNHd@;5d zVNg6#RmvzMnUz+UK{uf^XEB2b59<@f{`^c0i4*{%$?pBM-28Ff-Yl+^ITo0+D?qH( zN*qO~4QrTRIuFvYLv76L5>=_ZeO$5l6QR##C}8(Zm%-k91?-4GA%N1v{7y2A1;}tr z?YW#-a7~~~X7k3!-`1YG0Wn4h06c`SFwrRMJ*Gc~3kkF|;Q&&p!YiM|GwcA`vj%{Q z4WfvDf=A8(Vi*Fdtc~hhY%*k&$-SBT(c@6yx;j3M7pZW%U0ae*GPr$I_v~8}Z;rRJ z$ON&8jSqH*xg_ju-$2)XuAvpwE5WJKgD&*8l6z4&9Ys`m5}NaC+0?7=`KMG_b_4^G zD`uFKow|O;w9#G%svZ~wM(Q}~W=pJ5ou|olVxMI^y-Ci422^axosj>nm1P}hbCV8O z4#1w2w*{ANpsp-{N~;YG77zs}LG6k|%C&{%6yR;Mbb?GbA|Ra+QLwTB#bVp1RG=Fn zz$ttTm(~Kd>3V}oENPLzoZ{E{HidLC$#bJguT!)G65U7LV{Q_utBoLos}C@V5dj`< zq!M@lov+_(8(p321HjF|7?^vfs@=Zkcu>>_eh&B|eG_#2Heq@FTn@;g3^gEuZ+kZf z7|F!f8fc%EA5_Ia8KXZ!PL&;1inPx0I0pYZ@;rQm4uP#95Jwl{v|=rARB21iV;vxd zX(1hi>I8n_Q;l_uL_d&eVn+ekF?D{Z01Htld+2D4^hq>i!~uYfZb|~SpoET1BHbKMcjOAVXxp>;!8e6C zhaP2^o2QV`!M{P@kb=+_0DxoGCN|j%=(_Q@U)&Gb-U4>c4!f;|^}HHFoz3G6DK!m- zF)l%Fic+aMLX5<*4~Qnw?mdt!-9QNNn?c}A%u-QJU9jmm;h_}+lZC_o<)UaQd*)37 zNLMSGz~$c$TaCcm3kHBlh+PnXOy_+I(rfx@JQ2hhKoPldhzm?Kj*dELo-u-=;h@|a zB7ZrYYc(!~^bYR?<2+~}Bzwe^9ApsgOn1#&Of1m{;?GQyr=5AdUS7|T%7D z^f6k0!)>bnQ~&&V;}0!AY+&K9*GZJaPaZN1yXdO9oY&WJ&o$s&jcCj-ks*GYucM6U*TzqM+B#oaeVwD`*W^t3fgaS9=C_|({)ptRz_Xx z1ELAPm^bVIy(BlioeDp;bFQy6iRO7@6?fh?tl6^eGMI}05+u#xuWsc`zg563=$S0T zUO>Jf;+D$t_hZmED7_$BA2VykR(bK?=@l2MM1m{HQ$hZTvw_5VHitA{ZrGDxk%*$a$>2MGm2FZFGFRNlu8g;tI6l1rV zJaoxKVy%~BQ??@8-e3(U;LTp|WQaJ3bRiMzlemv#W0K%24WkcwL7GIAfd=dOQki+( z5^Ow!iLqa>A{@_XwWi@ei+}SZBX(|P`<@?N)yrX#!V@1`aK)}6qxy+ul|ROP9>4Uu&n{`gO;yV`@WoUGiYR3G zJh*mtDFLH@JJ^KA&|=ZyM`%L`qJN&PsYBRIq$8mxC^d(mTZBbtgR!Yu?@u}9xT%g{ zeCBETSK=So`FQtOr6ZM>$T@L!e8)Xd$Cl!iZ!TGm2zKb-FQw5Lg=HGlP`7^n)up%? zQz5W4h;2qkq4da3rM`$WG)v!fS5DCbh4(^Pi&HDGz0_VprusNTb-s;zX=b>paGvYN zHPDhA^DACs$G8EmoN)qczyYs8r5f~?5Fg8nbxU8{UkpMIYKgw0%gb(4V!v$K!4Fqs zz6Yebd58BG(Y>o3kdO`P!jSZM+C6w9bLh9mi*BX+y5@S(s}0LE@>`%PN8Lt42*MRZ zYqqVB>RNssC)u$$+VqK;z*ZPdVxh(4Lo>Bl^JfM}J$&v_Z8gYkg$s5-c$_)*Xi`G+ z5#EPWA==A69$>h#MUyK|5sHo{vj2EIl6(ZO8amFztP5JN#e+y`K&6ZurFj@b23K8D zhz>^Si#BMTJvRne1+>oop+vh3-kTvP7&9&)5oMa7hmMTdU@ z^E>meT>??16uH%pb52Sw?8^u%HV@;6B3TMbExa!nM;#{*CpGrrxxL?8nTXQI)d>}# z5&eEOr(qBT@FKF`K}I#pl5y69o5f`B&96Q-Jk{%;y-eGmuLNYKA^m(Ej~>jtBBFrO zHT!iaH>{GAlyyT*W}f_(geBAGQtWtEE_)!K?!v?Sygd#Z{41m0e-jrXr3_3YhVt^7 zB4mp;KnEQeid)mXMB~}>9H3FvD}|e8{XO&k^}{?+9yuIzs z2|3y4lYw91%CPlp8&5e6M&a17MROv>6?bVGCXcDm>RPW@Ufim8UsCEvi_Rv zBvH(&6YB^Eyv`d-+O%(fX<990n`|=!LI77!SmgwQ9OMoN93o?rH6MH6)-Pz+ZeUd2 z=g#M&Q$fERDMGL5hPz19(2ReIx##;G*6Dh-c$CsoXzcppy`+$pah-P5Vjp)s;kF50 z2HHD!?6dR0>B-(CFFMaK?#w6kv!Il%tkcFUbWqZMWK4|*hpmx#BTw14+cFAbw(>xb z@Mrc~MQ#rxCT;jwF+#U8Dzq&wMFT&47&4vikDI3Z0CH-c`@1Ut|P299QP`0~nP&qm%68 z;W0jTe0GR0AT{Ch6V*+>SSRZ7cEsi-MMUI7=mxi$>TN&2f&#?|EEx@P%~fD;8=3vC;l`S+zpqu*%82^R@Z}{g!h-iYFoZ+*h-?`M#gMEO{YAKQ7oeOBewm z6Zp*=Ds?)!ljD=11BKdC>-A^&(EhbsVhoDhv=)(dADVLfa_cwc4syMjignX0x9*{|oR$ih@c*pkaiD@#?Qnd?XFHoN& zq;bmV;5o6+whBF-`cj@KemO2j@+YZ-@}Q(D#NHdQN8xgSUc5bg{lo`dpshV#Ng^6f zS8F8bSJfleop1H-OGVR7l6)3UOnTNcU> z)}{1Wj)2cxr>9W|!6R1Kt8LRr5h+Y#0v7msLqD%3aLbZN*8<>2zkQU2c#iGTjUcEg z!flz+8mwhFdRq~)7a(jRz{nt(;0+w*#KW-OMY~A}qq)GOtm0P%Gwl-&{_O2PwlWqrUKy(2n~Lai*u!8-5`7Az(F{HzjZ3qTVf2G4R-mFhE2>3`ii1CXN3S<~w zGRFuCO+F&ffX9Uz#|6#t{sv0RR}7J0AiSWpO5E=$aJtsjuVj+W8M@q0J;{{F15k!_Vk^8w}`pZPJlY!D8lpC|sb#ex`vhwMw~$UFg5(GHIOjI2nCp1K$-C^9}p((bB|5!4Y&7s&=35*dTr@eCvz5nf6X)A(`T8)=$WL z*v7EdDF5nXZ|gi44`mK0_+HYzo8dTBR|8!yr8zC1GZHw(jQbtsZZDfNkb?j|j|~+i z(C+z_!BF2_wbpREbR!GYX8ThTL5Pp5qB#hSQUa!(r8%(M!rt=DY9C|9T|<0KJ3hGa zP`SGLdAvlH5w}_2Uj4zB66tRp4Vz3yCeflK^mSane+nbybQxtecW}7XJv0j_Ae1hCFy~N8db> zLtYr{LK#*(*IzJ5Z=WIQ`{KqT>N4!QVc&NYJyn7A`IGim;oXXWk%6h zk-5y+*Aj1-=8t?|0DNiwjiT3g7X0OXQauuV8s?nh#$l|WRFZ*75Av-TgUW6>OV=?v ZL%;hW^N?5*Sn*ZSAh{Lk_a^=Q`7Z=zsQ3T? literal 0 HcmV?d00001 diff --git a/docs/time-cost-comparison.webp b/docs/time-cost-comparison.webp new file mode 100644 index 0000000000000000000000000000000000000000..e00c0142ce872850f1915512c434a89a43671631 GIT binary patch literal 64626 zcmb5UV{~Rq)GqqQX2&)#o#4JAm0CiCz1vLc@BE7%w$cW(CAm2A2h{5=5SW+a4NXqj`AJ6(2 z5F%u^6qT#(r$4i9I?dNR4WnfELthpjXZPdoy=`MlzxLZ?$%_XE<`cUzKf}N5cmFx};tL!A0)c*g{IB#M zJwV`{HKGpBfpGXh5pK*uus2AKQq58ztk7dFQ?DzFY{ym*T24L zzQBD20io(BAN3z{-~Hx%jX$~kvVr5DZb$0Bb8Glt{qkRszG6NHzSLi)w+SD%fxvqY zsn46IWQtC%C$^1?^Obd{##&>Wsj2RmR__15O}Pj`DZfo%z`mhE@5Ki~v?I%EBX+Qn zpqPOUA7R;%-8t0CYQ!$dg>LouFl_v?pZ0!P1&0xm^30z<@^SpW@kPb1Y7RagHTPn1 z{wrrQ@S%PESnbg!qTx14(l7IfT1G<=KVPu__vl2yqz$4c zCO_?n!BOZhWM<-)dLUqtTasc?G?;!$w%2#Kh1tdp)_#Pd*O5q3ac6xRdOj|8yCD|! z-~AmVy3t!NS1Ax5jW!q%?mIP6En*CA$>}Whbo`bU&k8E^;|QYH04ad1w+$ZTvNx(@ z7cIk!tve=@q=SFWh98XW*j(z;XX(F1vyR$BS%v|)u;r@6Ln;5`EX9PyiUlKgXWz7w zDZH-65W8oLvc4)mTJw1B5YAvFO(b1Om+)Kiw0!Sm`z+ONFC6Q_nlGg7%IkkI76oj$ z2JpHmzF~A=o&No5D;GIoK;~bh7@Z&Y&Ssy!1h)BBh33nZ`FK^Vzi`y+$634gM9 zn}c)7jU&rQ!<7?J`v6|Y36&@V>DfEGJ+oXEiSb0qkmV1p1xA`ApjA7F6}js{5gZ7_|1j_iTjTUg@!wUT`p;JzI~1qfm0a(0 zSJyBna)0R#UeWZ;m49O3#|c2g`^V0)dGV&jzxXyAX*a?uZM0hHc)9;~Eoqre#m8j@ zx%ww|avDjx5bC{&_SDW>z1+`URB)93~XW^O{a=z-lDRR+O5F0AS=I+ND zoZV#DrF2`d&m!ox2M7*H8nYd|mH)#8lr8{OD z7j;uKeD=`|9>LD+Xx{5bVVBNo6!MSvn@;3afXA(d8pC`?&E4Ci5~O;i(_S@oZ8m?( z=tqxofs}x?CmvX?X%n9B?5o`yh)g3t{s(VbXpAtpIz6GnQOy)be@?fA)k3P`v7U^X zGxJfP50hK-(hTTK#@~`yg*Z*=PRQREXcUXRi@U*lJVPv5YXA19UMp0KrQD?|#A01( zDsjA#qQ((d3N1U zs2MV|t*f&FhE;jf2mIO=?mw_vUDhj)t4s5$&E|9NQ!qvM&@ zmo(a;yE6afiAZ61>}HAQ+E&!sl~Xsg5PS6hYVd%_5pZTb$mwJE&(xBB#>5&4;QyVA z1?seeGqhUTKgiTP0Q3b6jK>Gdc~tBnRJu>t@M%N+oAf-ViB3m%{KfyKz_qQ?k%o>X zCz5@tsJNn#$jxs<1;x~bspWOZU=8HqAJphv;=^=S0%XXy21HOM(kRbt;-qsm*4u|P zf=her!CTK|h>2uVa@VNdzg2KZDN|+Es2>1hq_$US`&UgKCX3~slBk2eB3Z6rS@x*{ zKSTUlnI~gc#0xinUE9M?RSWntre~P?h=0WLJ{fxla69~mvvfZt3#3YiD=<1l^jz1@ zxoE79@Qa~PYK`>e5j@>;b~^S@z{;hb*E^*KsQW`-n;S-hDHuXG*>8zI&%fbYEsj~j zH@QbF$IKXT>Ce#Fva?F-chj0JqYfZPQy(dg`cmAul7uA6O3hgo2EoY6eBM7fDSi9@ zm*fCGHK9FG%wosc@Gjqz<&RB843;jlk9NW%cTDszh>k@HuTL;#9R3GR%_H%AwGQ@V zt+Z2R=?j<%qO^ZG`F96wfqeCkiGR=hV>1;Yj>2>_Mlk0&w!u(p0w->vyxRPC=lP4R zatNQ96Xyl5Bg4i}DmSDAQ`oih)GpYsT0)6zfCoxB4i)!%f^RD4L* zi8T2Mg;m@@iBw(ZHny4A-d9sWX|a(j<(Q>p^g7Z?%ZcDep3{bHJN;+24}>;y z+AgnKOuE3amr)=3dSu@`>X@i&(L}j4 zA%P1rm_XB#%Nr=nE^-}06yJeBa0)k87jCUHngd2O8MQt|JXu8Wm7HK%&v@G98sN`+m*{QtxSvjEAK3bi!<;!(yF#qv50%0b z&i-5qRN_6Zp)>csu3?TRGsuw+O_eSE1%K9RFgcJ0TkA@wS7ro`<|u9^)*Mb4`V#!> z7<*svm4A_;3P>ASBRwUfr+=RjbTA4SzKLmu$zWi`Z+=Bj7q$}7!_4^$f!f$X&hY8p zMs^nCu5#$ZxU9r!ih1mZJj*Zm*}PNCV|Ws8A=(vPq@6*Jg*7upZ7gY7d zBr(+p!5iRB1npyuzDS^8#2Fe1O%W@N@4}lzQBLsA3PMyabkj5+f9H~+i>N{Mq-YP} zOK`Smx)6deL;?o z`IruMW)o(V;|!`=(%2s&;a~Dh%}oTj7)QGOACx5zQO4EC@i8V6kl=#0M-nlg?$mfK zse-zu`aVG~XT~lfEb)JV!h#QT!L830f7}y1Lgo=6C1A#MY!ojXS81tSv*71L9Q^ns|3h9zHHqty3G3+BQishP*q>p2ZlKqs;b^uf>9)R4-Gtew3SU zFyH3b4>LedY)9(gih8ftbkn`D zlNnFlO)3XICBiCZUh0hO>b7{q88LBZ40sC;`%H_S*WIKaAKX2U`WAmForF}J{^@Tk zRs&?= z=QUbw+ci0Rq~dGg*GwJYf_$A4zk0u?ON?4`Z9n8IqdiGwyEnj`$)@s}!86M~L{35r zzLbs{5GK>gXy`#J`B1F-jK=e>vXkCRUjBOrz^n~_MCf>^_wry6722{R0#D~4EEm6B zFWQFDUJ2?18Gs!$MrZC7ExljMFH>27HHV1WdJuXlTjc?0B>&zQ$*7o+QlZnue>-zf*1Q^pNLcD$;)YrARnd^L()KI(LF*+xq#x^|40glT zXV#<+XZ`&|@k=8t8lKp5=hKiiji91uBL3B2hrv(%OeaUT(8$Xty>GZ>Cv)Ko1Z7e_%S3p-Pa zIrzAsWu!*i`36g0HyoLsoMXcAU5Y8v@J1_{lmZvEXze)nrK7kHDj)iF;y^3DR z;Fnd2I=&eDOh%>HJYUC*)|&C#*1#oAAp9SK#^1k6&1KYeDZE&r6R`5y+G%UDB%!bb zW>_l6x_%c+%>CXOLT++w#iJvz8XVsR`UDhbHr|?imRMEmH|3ODSgQ5lAUusmuKF0# z%NkW(^oho_w3wn|nFM|3=eEMu3bfK6)Ad(%14p|hnd7C*ZRrctF#^w?Md7@Fmab)F zU8Y@aVE(%uHg_?h`P;KZgLvC?6~Q>1lxs;Whr>!L@yXdehT5Pe%o|&rL$;l}p(H## zW!m9L3QG}lbE@j{wq#~AOn~K}mWmg%DnK5cA=@FLZyW>5#7$aOtyYrP7>LezW!;>4}N`;C*fJFr2l>b=UfZNYZ8@Hw^#8L`T0{6g5dt3({D>mAxpNg9o?M{ z_4hu*!#@3bHGMwuEH160sK){@NSJ8q(+U(JyvK-SUCaqz#xeB9?Wx#vDb zDSmE4V&SaC{5z!jHR3{-LbCd#9|;^v8k^{YC+<=VPfm||5?n{jn+s(JS=2}pI98Gf4ifc+#1ww+vZHvi7B~l`*z8U$+L}~`iQ1ZL_4^qmF zXGS8?GI;rdN{9|fHHIc=OX6xHS6*w?14wjek6jnB_WltJh*|VX#9xys97F=GFyC$* zb1R>$<~E0+7yzIPHQsqaY5N$jK5$Gqs?fqS4XK$74BAb&u&rSl&FTsi^csjFxFirw zwo&z{sWkgeI|LBhbd7=ZYEANoJZ!W<%}AsmLIbmVBZ?dWK=B`zi>&y;s#iC5-Ll)d z;w=Kw=j{kJcE>*D+{k$c)4B`Y_~>9~btGg(vH2mYH~x1)BV-vVGGBtNHGnFAOek#40FC?H}{OuO$dv*=3OdPr{n0VY!b#GJ)RYLAUMO3m&Xg>^f z8-^y|;`SpZ)6+kOS6JU2om6Fdb)W5(;`G7;j z;+0xFfB8kqx*m%s*q6~nzKf`>`AgCh z%vNN<0-if2ujg3k^fwfs*SNozt&X`+<(AA%E1V|1jE4@^H|Q7)Ma5C_5)cA5F!zT| zyCowByIddFPN7XxvZCC;Kl0O)^2ixMW3r8J>;+LO0TYO#RS6>fyFcZazy_t&wEPKY z>Ne6&3$e*(zt@3d1;gYi0Dl)eOgi`BE3u|bCpV2WvP9HVXZZ|iYf$xYTo*A96h#v|-7%M6!k4z7YYq{;9!<0*(nB7tlK20dEOy&GsN=F&|e;fpUrJqz_(? z>v84xx*8FD*0>l`lX$Q~_-;mFBkh}4VmxbiRch0EJzz4~n>sGamUUW4&o;4r*+(#* zqfEMp0piCF9Dgj=yJMq~V_a?iC`XmMB67PZ3qD=Hdhgh3AFtH^ObSOK5z@6!sD`Ec zj@l`4Iq$+7k9^0*)`$=qR;r$m#kX*2&~`({@9z@Wm*UAdte2RdWpON^Q7I&D8*_kW zpvmNk$MlDF+uKStN7^L6S;6cP% zDbU@syK85Gl0Q;EnXOiWH`h*)(#ndZ?z@iB z#mF5S`l9El{m?*x;YQP+64nNC0o3;-uU7<}A{iX66-m+xXu=a2*q_#X{f9`bABjjK zB~e@7>gfPW>DDJp^S37-92YlCDc$v7fIa#`RoBXI;zRu_0}zfOvmI+W(Av6%MAZEp@0X*s%d zO^JWqt`Ui$(Mr+zLTraoFi1hlUEx`0+BE#wbUEZHa}7 zS?X>|1~SC&M+}@WZ`u2;u<>2c#z2pRkFH{y*Ji+OFnScAv=_M@jTSin9Q<|%x$Tne zB!q!w8sVoZ#u z{9?112tQ={SOReL2VRyEfMX%H-~k@n`;ov)yIA{wPLSZmYd(6o?kR@;+64+;rV7p6 zn#nwddk`F1@KdJ-=jeWRz!OS>gYY)pZ7=x}AyA;GC|6I1$Wl8tV{@!(R#U#24AQUx()?JaO4kZQ}y}RP$%4rnzYp_S}lX z_B1(1$?>w2i>rcFdB2nE%>;m=2IWmRI(UW8#lH2DRZ0F(UtQI7i@zpiX6Uxe8t6)2 z+3%KZkIOSw;I-%sLMQp2gymeD>&%E~-9&l^$YSy9r_1f1BX$|?4IR+Y zWQ56HL4U*lF6)9 zEph7V4yPYUaThTC;{RS#Q>JJ~bU{zbt>tBF!@){H!}`;<7np(JWf%nYR#1*;6^)^X zFHA@NP?`Lx9wz^onGcFeH9i<#4tperX|1>8TfGqhkA-u|f{zAh)Riqo(7yn8YwT;C zM9}ujA!(lcaUr|$DNk%$PIRRW>Kv@shoyWR9mfie#61&Eqz+2BFj~j~)zDb?Cp|E0 z;X~zKL4e zU{fOPqsILJ)Ja%24-}Oi@gPmcQxZHA4VXJrftf*l11&q-NSzYn_oBDA5hHSb$Ox0G z)Cml|UTpDmbHc0|ZXUnQH3Hl6p%Q)%c2hOAw_3}#+VXF;svs~uDe~Cp+Lt9o-K59R z{T{INAts8teYu{|-5wCw`WsH?b?c3#W5|?<6*{>pq8#rBe<7nTN;b>>PTfV?pZsq# zOd@DKo^~sgMYF`TB5Z!}ccUap7s6v3X)ixaHy&QLuH|^M@rDsBgl3=`q6)eSCR2X< zA~GQ{5Dcs}Ji|_$ zf1%AQxjP5waZ--Of5ySr6kc3tQJ^q%*LbKSti$8iX{(r3oM-x^g)tH#mWV%Lr?M5O!Dub?}i6FIo%*Qa#qTy^~|-?FjG-Qg4+mpMAVB{&&7U;iTu zh%5iW+(-GR>*mUt609ATh_PpA4dQ>g16y_GIf42wBp7P{)Kw32JNB9wB6c)APcub{ z%NyW}2IsZG>ye@%q`GKO0cSRRALyMJ$5Zm65!w&QcEZmL+$0)tBdkc7ZJQ*`l!AAk zU$zsuxCFBP{`d)$@ITmbOIw+`7jJHpywMkt^8P7ww}X~fnt+CDlT#+O8hg|JbMM7& zCE-o6eX4>U(|7+@8E7?qz@daPbbV~Ee!qQ3@%=&KnX_N6T7@q=?Ea5RA)dPfQS2@= zYZAcyroG#76+3%EX5=Z6K0_nKSXK;G8kI`LYQ{i6 z))+(3vMA>ES2+DXXEzdOH1iq!`nYQHgS@5?(kRzsLa=pYtuzjn<{t^|ySkJU#)`uV zahfnrd#u4Y1-;3r8*$*`{f0Hf($w33YV7Jv-oJvCIf)eQjK^QyU`ixeGwhzDspN5xtM38RXFa-sU|6eNX|32_vt@dZl_Qroo-2Yq16##sF@d5sO zkN~=U^cqjH=Z>D}=cavhjQ5|L)o;tsF*J&c7jjlqaK5TQ&xFRgez+JnuQ4;Y3x`G^Q=D^0p7fn>ajL2{6oL0g`utXy)Lh{Bj2ReRloJ%ir zo0ENDSC?wk1=bM()iDUa??3}4+wjS{HcPb=gG;y$d*ZmFU3&y;gjNebq+8FEt%x$p zVKqAy)J!7Gk}+7zZt>xnnvYSAr*Xofg9da7PeV)@#aXF)n{dIe+eP$TH$a|^LID7e8iTtz*w)X<&(Lwsu$1@ zueE(gS7gbJ9QgXc;NeT3Fs(1b3&agpC*C>}$$&6NP`2XDDw4lvrr#BKnRymOE;bUz zU4#V1|Ij`EV_IM_ZlHaj_nAYMakif$t7SgBE_}u02m5A7I$32CWIMl!daORDGO|$Er#bh1*$nNdbw7_2bQ!D~ z#AATfcwp~yAj3aYH#rli)&6^1P@Y|d*nclq`R*Nge{UvLEk+dcz)t(R!a~pQm4?Em-|@R# z@@|&~^oXpdV*aG1s}s2-BX7i3cQJPaoAfwR+d;C;SJePL(u``>WFZLmCz>>NBS#L3 zL*8ob`lJIXV&{>|E`jqpc0ty+Mwv|*vak05=>%B<<{47#1r%_Impf_OiQfSk_nGiT zFcMpw}h!9a8FDMR<|(^TJQU-U_zy-dz#N!gB10H?~8Xri9I%-8|L+$R=X#_ zHfgAPfHXY@vHG*7ZYKEt?uVFt9MEaV!Gj`#p6Zt!T0~kF?Vq?sE4k*Smtb-^*e3chtVz2ZzFPA>Mio9e>3h}b>-S03x7HM4@GrZcq+muNB1?Q6 z_#>>{2N+Z|7}uvJ3ba9atdI%)h7goZ{8wH1^-}F+GHu%us~jI!c%&xE?IUzZkMHxw z?$JKkZ||0*pY&;FrW))ss_jMr=Itk+oWSF4fmuGT&J=LD8>%XXB&Z5Q+79PktzFC z&2x6O#7EH z%ScF)Z>J7XclcjsV*I(XAL>CP=g*6H@%-$rxjj+RVUfG}!Xh#1Dbk|~{rY+b4*T1C zM|s|1;4b}v$^4Iq)FGOU0Z1P>f1A_~21IemF`Rvn<%Fb_Hd%^C49*jH1 zOXY9XVn>r3^Nm0v64>N22WXYJ<4Lb8pZc^XU(e(rHApEO5aql50ifD$)AEF#WU`B$ z$+6Z5e-Z_u?TnSKuAG{VifhFEUDbsE#7c4 z@i&VWF~AveFQ?@|w1L<0!!9Zv%dXGimiw>A_{Lnx+a_aR(9st1_R72E0M zVZvA5FeQBjbfqn6`kM{ua}G@;w@`Sht>AzFYUG)?c=Y1{i*g5k!x@BH1 ze4$L3R2IQBIr`Tk$N^y1SIatIAc`pU=!pO(^wqxJ%C!hSy|RMLf(g%dJi)C<0&26A zbPqn*$a}&@tG#dbuPv`({v+QQyQcfFa=e#)b9q_RvL?$Z4g16QsMXyix_!NQ=a>|6 z3PF+u#?P#kuioe4hqqhhnihg-@_l(23T=7Elz_6C+u&NM1dwB?|H9-B1#QJwr|}eulLxzTCxqT>2W3b^ zA^DCz6}5$txL-0qwB^_QtbJCUmT+RS!oNek8A(vC(@K#!@u96fG2ba^_{)<%b6`97 zIkDqbeM{seyQ(O#ii)S$LxLdG7u$$OCgG1f*Ouwm2svc*9@sS0d{e1g_eY9~Rf=XmuhHMe@jYPBNSXU~}vkE43)jX*0q% zOwOD6sarkcF_7T5HsLAxECJS^^qw6)D3iH@Tjy*8H7n^6z&0l15)cqlF-s{*!mJYj z(ECnncE6O)a8;hT?yg+zmE&Y@&I0V7bIa3SYmDBdK3p3IAM<(jcAWIQi6^4q-aWEY zaE;!`Tnek6=toDlBx|ymeQKnE#^wnw8*B~JQ-{SBv0P+@rCDQal~)GmXxg&(sjqMb zXj&g)HD1Ep)>!zpQV&t8ZG<(-&Ir!(!k;Qlx8BSZ{gy9Pga|Vsukc39b2^U zD3qUC0cfBOl<&lrs9{K2sb`shIRXXb*v~!Nj??Z_@rdtY+$~$YR$J-;zXE+*+`m=e zfMr-!TgFZasCr`s9k78HN@#4NxRTE|p(^?Feqx7o%OCcSGQ?2+%0BDtuPnfXO7Op_ zO?Giruh59N8eR`uU^1I7JbDU<7u=YK?ncV{qQHDh>fX!jDy)rE>yaox1Q6QB(N zHTn?)*A3SeAOerdZjOpttbT9xgZ$-Zj@^3F6B={~ zD;jD#rU*2bY2{!ZO#SAHibK5nJEFqntrctunk%u2+3k6o5X1;?KSu)(j1Ns^3aXWU z?goFBnVLv&0K9s%1*&?zrF`FO`rP0Ah(r21&ETpC!>(#nb^Z%wg1V=#&0P!Nkz5WEij2u_@lo-Y11%PoKPy7AJA`m}^eu7Q%83VZGYxA;eVoh;0k*LZu}-kMuPo zF%jb2Q#mLxAu4MMhjzU+BTL^ADe0ZgAX%-xFHTRo98PWi3U|PJZ^X2ALd4TaPs(C4 z2Pf25(`h5^(9y*ZO;~_6sWBk1c?O5n*dXf^HMdJ^f>2d_X-(oj=yI8T>|-s47vgT83W_=BMXb{V)3uK@AKEqzsUw4&ojjq=M<&8t#*ZKTxuo({Yu2Q<4vWmcP zn`BVFG#T}OVVQU{i7QQxlw=Tu>9wCzpL4C0Et2lt$3Sw<6#6W^$XxTq*@51)LD_il zUl z^ax5V1}|5|=$*c0iGG(jv85EmaAM*JGZ#~(8h*ka>6$&{)B<)Te6X=N@~9DsVRlzP zBhL@1H-#{B+D;b;J}-;Uxekt1MhDx}6X|B^OdO?&1ry&b?fcizLW2r)pJ$Xl?d*t?ie{|X$kR^ zaVQ|&Quh`%b4lAUs?w&5cG384!d@T%HJ)N4nAPjpWtf}Z63|P0o+2`qBVPj0XB?7# z(2Dbj|H*y>K!Mhu0XuqkZ-{aWEJfb(4@oaFV1?ZP#S11r9Q5C#6CV8*+)J%wPAXJ3 zqQt={9)d^fdlVpfRRn(h&f*=HwY(hk3g_|&JyC#|2_GyPS_h- zCt1Q-ANif(=B8V1e>Mr&4wrF?cma{{%-cA*mWc3+A%RYD7IyGPn{cFF48=2`l`$e>u#|qL z2plGd@J;neG9DD>5NUWA>u7VsXD@L)yZLX91;9b@F2%52vH~~2a@mu)ylW`$(FYWC zRdwa)IO$(u)x9PyUrPDWj(0$Gz}NH8W5>UhfS$pun&Vkei$=mI0)p}kbi;l<;|9!% z(WI9j?m!P8A z#T01!w<@~5^ZfhYh*$?dcLUiTxVehIr>Ule04yeG^;c9q|2hfEWhx6oR{%i7X;z8{ z6;%}k$jt=<>2uR+_RRVkh5C1Eb8~F4lqIvomU~PnC$O-C)s{8$PogQj8N5qc5}gZA zyK{9}jGiIic*J7lkQmRhV2n>JPRNx9`&FqpZcYeKPu=_lV%VJ9iNuU6h=~3Pj9zVz z+=qq?jb9KTJxc`Mj~~=-18`hSRpmydVI{rxlMQll2Z-GuQU12e*P>*-=rA7S07`?s7}%qM1{>cY6t=I!jMuxCdN z`BcmR;G6&yLp0?EdB(k58LT#FP2OL$h*Uv^-PodW+DM^Z51&9pe}-EWg#^s?QgKr) z{Q>XwvF;_$uCpH|lQk?EAQj!4TX{~QK7pdzZblMHk8Vo+Y(qL4SQB@939Mq7J!*xe zZH4-`(jZoj6yfTg@}ba?N@7!B_m9cn9MDlH8Cjvcr~)+v`}53nOU}%~sj)T;`Pq?? z5wsj^oVpxh6CPQlbPwxfCTf)e_UYswtkapq-!rV|Z+*d~ieOwE3Ls;crcC+-E$B_J z4GHc>^+L2=5To$>%P3yJ%(WpHw{h0IDSt0;HAvSXnwrLOAM6Pr(3HZ_S?>TqWKeWK zkQZUgZe`S?yLs2Yh74(F!!?l2eu1tkSB`VWhCjOcM!TaBP9!#5gwaD@80^OThXt?N z0)8T1QhDW#bK=;{h!1VU9hox3T^Mhx`cR3nWyxyoTZ%y$uhNjrJ`)rB8i6}eX zx&z!fxQ##jU0BVR+?pAps~pC6riY@$1fAlzAC2tUr68e0E1lOVrJMWxlxtbp zQP*pfmEeXlyr2KYDRk+nU>#}sD_v_k$R@jDOwrckqXamtk&hyStJBs$-u0 z>J(8mi1#J~HaZP@n&RU5ZVceD2BZM2xfVmactBS%B#GG{jgFgwkcpn7x*U_H!E7`G zVKf|au!5FBA%j2%B9fjs%0Vd!=mi&N!WL~!MqcKQDSqVL5^?Tst5sfucvvz)W!Wox ztsvpBn}Toxs^Q6%dLNC9Tv`a1)6xPXGZ?VUNSGbhiV@w05C9L`puDw$V_lr{W#8iL z9PCTQO;xZxTg}Vn-r9G*ch+^+++J;E)7XYTCdW6cL2gqc*j)C#Dy>zgK zYik)!xq82=`I>>YqmhdwDIz~H$m}0Mn251Ld)9B`G_;s*a`#;z4cx+ ztc&DzgV1|>eNi+`S>x)D5uQM#FX)>q@$W+H~c-?y-?!m~Vojt{RyViUsn61prSi zi@+-BjB74Lewy5N+VI&*VQP54Os9~ex-9uux?&FgJSXfemttGj(hR2^DWQQtt;)9> zVlu`vyz1SX)@2~Ei8pu#Az!=2qujO1-G zW?eec#s@WqUU7-yMT^O{`^71nS}5O?()H8a^d4X5GTj&jp475)$gJ|eksZUU=SxYt z9~gA+oGc?dskWy6F7(W>tW@U|H$T-o9NE4E!%rb~;4LUvHf%X8CUy4OK1Z02?&yzq zu{3yQ97n}CJfh7tCmk>=300s$8?_yyF0VhGREyf!T3m!@#8y}Xo&Oe6^M!GpBKjRW zVzcI&z^jU*u(sGp;TJ*e^T!{Y+!zoRVX7V~H=-31Fpl36GKQL;Kc?vs=u{kFJAN)I z6LxBPTCZYPQH1F-xD#g_2qfAdCH7N35vGfF4~S99y`@MALxk8dp~-L<^wy8F zx7KbNUup>WKJD_aL(hoffS^e#1Pg5lj~4G5OJuC+g>BnJKKucUET%4xoA`Qawl}Cf z`}Zrig+gckmQA*MWcAIt_dtt~Di4R%&^?A`r5PKPb1$+8TX`idh>{TC3})l}1`-$E z!dUr?e6Bm{cuqejHQb8*Od5`>&SUy1%AGgFXQzQm(yfijK3mouw?kfxHO!oovzt6) z+5!R7GBEc0Cp>YiG^E`{%0(;QN|Zhvn^<8*3LRp#RjCUmhpRigp5 z89?3osiiQ==;(zL=wPRLf{K`!{BR9$oy1xSs8S~C3%Zve=yN~0$qE_!xW8!`A2HNp zg~9bw8ip%dvj1H6R1aIGCV6yer5!hF1`hvV>l{2 z0Q7W5k${q}g!Tt|^MRAc3>p}i81hS^!oV)ikB!C<@aZ-tg3&}s7w`$aEVqmm`Ynso zIF-w#Zl9jUw1YKf-z_R63p=cCM$gY!@uY_AMB!^D!c>Svo3&ciNcFxCkwzg7Nh`ses2^%^M&*|2}BrrRlEOHpl}y%r@Y{> zxnAl|Omq%;ctYQCewT3`!du~~+K(iq>2%WXXz)EM;%b*TR|oIeCQT>X8EXXhjt~-y zYjcHZ&MHJ>VMPr>NYYFDp229XClV?;861V(r$hvsYeq5{;;q2o;yv+YI_r3!#y1_< zhr&S&pK?&x40byxx;tlCG~;sezJI0IrOM1S%9&6*%+b-mOtVN2!gV&rz#u z8FVrNH}8j;!xk!AwaA;1U2pRS1lL@{*4k5Pb)H?!8|g9=a^PM3u?urHv^)hD*+z@i z%lFNoxh93`cm-J>1Lh(C&h_iWx58#(M0o9FBq?>2*00-^2Z{U_)h#-Ry3Z}5L}JnQ zkxJ`#rIMGf?cR&iEsO(Q7)!GfaU;CpZ!q=TDlO`0X*R8E`D;b5G(`vViH`Ye1b-zG;%ojm;ZWG9FNvfrE<9O((9f*dp2 zRk%LHZ=cB^M2Hl4v$-a&p2)dbe%}&pweUg%RSg?Edu+FpXW%mn|9=wfSZ@nrk z-W`ISBETpAo#+(|ZvTl)!E-}bmSXFlo@&)=)nT~x!TBip8K*vx=np!|8nAco431)s z^@FG+_$d!P*l^{T70z4V?NNT0oCzM4Fs9{=4+JF){#(3P1&>g=J&NBH@Yptg9an9{ zz=s1{1@20XuuK4sD-fW?*%0ti_s*;(wkOclY>zkjPDQZO*esO7tz3)Be1qJXa3xHE zXF|4vShWc=jW;3Dke;h&!>Jqy>HV;d=97L`je*vl^{U(v{=wK#sb1H}b2kJaxk zb_QY1(WU)hMhbD&*mx5?6wQ>XaX`JEXlPvTGC8kq$Sf~2!>>VlQHjpH#5QK@^YzCGwcW>GUB|{w+Y*C8_$qUIsI3|Md1ziDBlSZ=Dl>c+ea;PW1cQKLlLFN^q2E`H{E*>( z17LmRm_Rgevqw(JnP~nnqWF*?)s~5b1z_r6{wFw+A8nx1wyO+ z?*;+mhK00S(S4P(#=3VXHu88@V2=Ft$&H=X-KBL6l7$T_35{f;eBVW6*x6Vt18J|& z8ELbr!XDpLh9N$ZmXE3B3>iTNqGV|iW z-9iBH=RbbHq;SHilXsruD7n_IZu>lwj#eu83KZc~!ZhHK9Yok3X5wYc zrj(;w(xb$X{pJXBcD`tCP*?Hh1~Tn1Diq7RRn;-8oR_TAdK*xcdxP@$el>r<-OqW8A+Hwi z3-4mQh`n9<$u;EnSzNPN9YdW4@a0pR^!{F^Yx}s`^jX2;E%eg^*FJR3F$?0dM|LV!MUmBD3;$&{%DrT*rW!E3pdie5A!D_ zh~_V1pwJq>fvzS34MteCuaVG?p`W)?uC+t3G*HdxoT#AmyU#E;4AR&(yTYo${5t$S->nWuhm z{(qMTi>541dzR+JF>L4zhER2cx)1?g?`KSPO%o4MUCZnIS=1~L6a}$hCRF-+nMgey z><~E+cXh0mK0d(2trnvbcZ&Inwy-+EU;yeHre?uJQF?Oe8jg3#nZ`B=PL(9i1XFwO zbLCto2P4i=M>(1g&=U=)w8~gkaWpH$BcYeXh`1MIAPQ>>0Wc99BCLw`a4XJ2GiJyZA zSUAZZtpc(dbP0lJG6+A_G;AT=98RtTgPSSUSJ^|0d)B@-vP>K@dh;*Sh+St^Ssr!33{UuoVo{=P6K>s#<-B(a}VSD7^XG=03RX$<$!Mr zh01L&;QBWa(&HtAIG&y(sgKBK)xJx;a5yTaGzQZy)~moU_M^Dr=4_bzxgCXb>6<-I zlA-ZEt`AV-+{jpAk^z?dD=PJ-IrfOqb6;d)!+fN5+<}qKvNSYh_?os08)j#Bs)BqF zXQ5Toput2#GqF1|4jZ{v+pdGN{O*BGioz3Z*N7jn4q(*uS|ZG^B}q z$&+l_cPd6dTPXKr5j?t*wLgNV?f(Q4Qb(n;o=WtoO8m_&1&15O=KG!i^#B(45Bh5{ zDTA*2=X;FZ9MYr#85H^JW-_s!Vv5VP|ExRt>!f-43W+Esm5(&2Es?|61J?#909Gwl zmSZ&U;`P^Ho+Ymf$c^AIIurM}+UE|>v3Qc$*KCy$2=FrZ5`^09o%XF9c?LZ?P=L@rB9wZOA1eS@;i0GfogN@D;+5xJMcbq7_CtNl()zqM3u8=1#JqB~K*tSlA?u;*yeL&)jJ}q8BTj!vng80XM`t95?ddGV0YUnh|eekD9vT5 zWUNL-d0zJP-{$FoOW2P65?Cl2PEz}URBO&v7uJ|-+zPg_jyfHtd^+kFU`R}6Lo`cul(am~E#A}cxRKRij$-C0XJ<+tf-e&{@j58QJfy#l7{p_Ktb zNyS)RsjZob?9k;RsOa90EfjVlnY&}IeLCz9$Nra7=&kgbaH4|T(hZ)vxfD~K7khV1 zxGl?`;t{C@nDd}9I_-5yM<1H4FhuR+8_MJ;tTv==PU!k`imtarn)s^h1C^WU6`0tF zexAgyS=bC+!m{{O`Npa0Gg$iE;;+!5$=5%&yO{~Yd*N#Vc;7Q(Sn`QGnV_eZxA|#x z!KrE+?)-v}9vSz@U>&Fq$}s$0MoTEjk;#y3W~_}utgS%V6&1mlYD7Rul4e|l_s@L# zsWbQU;w7cXdJtiHFCs1)+$IK{98|uJw((Q~`uM|;=Vlbuqw$p{&o19 zukxsZ0iE)#1Q^A60*`LZ5&-QzZ(Zf|5NLT$2KDtLKQ?k4*2Jsha86q%^MNA%fvO#P zI;X78S}rTv=cKhF<(k19pKI88KP})pnNPz$j9bb$aI7wT`0F~fgz>evz)bxx zz#2aAh`_5!dmkxQXGWtnlZvI7v;o6Yz!l&=aP;pc@?oS1IoCVZQatPYXf(t%AYn0L z8(3NHuZp_?)Zr%PAb~Qx@kk9ZcB;_=u5(g8W3gsePQW!mgozCuBc&H-nOTd4=pk`g z_*-5S&^YO^FgAaT>a(K0B{Fy<`B=cz3D|%tZ|a~X#ROaU1LGIHAM!UcA>AeZx_|&^ zPI&B%;ySpZ##ZBTn)vMzsYFI(9N00-cJn-}+ay<`t4-!%94bMM#a@*7USr5zmid(0 ziQPvYIY@pGRV~Z6K~2^t*zq7S4&@dCg>~3yZ~T41pupNEBjV5tiMKJ0WHV(#ptY;fThX)+!U^)dYS=`~V01rb8Q=S_ zQ@NO{+JigP8I#X}3+8SXIF>rBdsHSCUgyjAFSoXX{#;9s{5>=aIUi|1*p~%X5w22z z-=FCEO0(b-8OTnBl!ExE$@~d)GJ*3e_VMJ5m(CHE$5hrW_KMj~KTq&u3z*l<5z@Tn zwKH)NefH#{WRW^y^BkF&Uc3skuepEa|DlSOziyT*m=2UlawwBbu4A@@PQ$|kK>Yxf z^!62UA>hntq^SNz>~1CW;Gp%+1pF zPl%P#HiS#V)3rTH*Z%{x6$@tFTSu`Yr8u?L;(HPbw#$fdQu0Mgf|-I+7D6nx_^yhU z9av`jEYdR);$(cm_xYjE$O49nV`TB)TySQ&e;ogqsrMK$VVQ>Wi&~}F0AL^dGtkZV zkMQ{#%YHOHbc@Y}P)NOZQOsq369Xa;W58GBNC&Y%o=mDWk<8- zhTqs`X~#9|)}<08i$WXBh;f*vz@WGxq1WQBc*a?EKzZYwX$oOHr9hDmUweV*ynw#@ zFHK$BDhiIUmtWz|%Dyr@N1Y|^>#kTO){RM@rnCPKiLS}5h>nc) zZ>S84#E>Z@C@Q%;MW{8hmX|#Jtk3SH7yp?StHL>g=<-4t937#O2 z&_yIx(f~gM>Mn1r4UrS?L{|qJ>9H;j6Wx#edwU2d%;D5APKvY@{sh+Ln_hRrauC`q zeMo>PVT5oX$ke(hgF;B2024cjbVjcYdE-A@xoZLI9EAgvcR+gA`0A(=XfU-=BQ1Bp zBU^aBz@}w{B(;;peLP`#KRL0zYS6Gn1lrh0$l5R|oRuh@+a_}HMUq2PxKB_d>#aE> z2g0k%RUFPtJc92dEI;F))EdQD-sLO~(ztIO$QAP<&l4aezGy-rx;=ok6-e?!1if*6iXpsS{taG1Z7oQK=+~6p=US$&^ z(r+Vm=>E#jiPFGclyE}D4lK<~ROGf^&Nfw!X5~W&qs8P?c^u*&#uXu$tr-xULF1H~ z(ShMUC?m?*Er(iwx?pt(gq@??Ykm2513de8LLgN*zwBm>KYKC7?hd$WeR*+TzPBuX z5A_-7y9>%xr?;Dqz`qI1Je8>BgqQz4F1RfIxSw{pp}N7}ZQ7hy{xd znA6)Z`I*N!Ad)&WE(@Vfr+OG-tyon<_*fo}St-ara~z*UtvHtI=Z}jO$LjD> za^ukLqM`&5IN(tF|2Rq+Hya}^5n2&f5aba@1#{TuNbJ^937BRW1lBK>qf|OQf>@*f zd&a{fN>0!o{ok4+QB<^DI=Oj-F{)IpE*fRm%?CcZ&o4tPznNx&+Uu z!-aYQqj^T1!9st)f~|u;)m#+y?5{caQ4@U`&?B2b^#6*GNgBOLU?42H{BbU>*0a zM$pKat;sAZmx{96DuQCBcnN(*g}`Ce1bG43|5YRybn~sAdH9?B2sFe{6~3=--afgk zmbPl~w6=QB3fYX`XVyj0vu!{aKw=ppn5{D6D-7ztJ0>5}s#-=XfS6?xA4P2(g=q*x zGkEWSgnL*0z6%QsL;OuA3&hRms}~B~#k(kIAx`_f!CLnCb?lp`4N;s||7Qm@-BPHj zbS$5LBOQ=<9F+P=bEKZ4GP|y>MceMRqCE-#2xHUTBh3HE5GbI&I>3TNMsUap>BLny zFf(&v8bX{CGj>Ax7)5P4UZY$kuUyVDS^0VcCG^?%?W7_hl!ni+w_)k$F$3UV#OadN zX&L6Mf{7>se*k9$?U|LMzjz{hW}%4ziT_*pahj$*l`=1MKVN$Z0WBs9@f)Lr>jH3a8_RUybDO@t2 z6NC$HOGoi=5);QMx~+oXwULjL$zN3|Ur;N=lR?Lv;zu-CLpdNirOL6CnRq`aBzW8* z%zV^v=jLjAg=L1snnRBRm|@xYDr)RmZa7rjGlo4F+59VIBF<7;=Nd0BpdJP;NHRb` z5Qtv3ty~Y6Wf)9VHxn{Ae#(7K!nFFwTRq}AFm?u~9C^+raf+;EG^I&kB3avv_aoHV zka~%#mXLrSO;!u3Neb#33nKYrKe0HOy0Ui5#r;NdC;D&G;9?q$PR&*>R*Dq?l;yuh zC_Ju4&(VOXK_p`+uD-6ypaR7uLgbWx&SScxn(h5mXT|uv5OEHVuKvwS1t?XOVX`F~ z)5~Rj?|j`Zy)|%)D%)L?5#1Q7>pbyNc}H+{6pZ+9hC$q6gkT3PiunmAG~hn=gtT)v zIh&SKA@mPM)(!FER-gp|fW*VM_GUc>ilV^#X3PBoK&@tInM*Oz3SD>R)R+ zns;907SvlsQfLAw74}pFJ9lW&XoyTf5TpG&b4*dOWN1gXw6JGS%ayDX8dQ%ToXbVjp!pou z#O}KZ71W!xJd|fuDefFJ8V6ie#3*$tkQ1XV^eNxW@%Mm+PZV z&ozB)UW}qn8o#WLyjRfMIDKd#7za{kp=+kT)WBk2coGEGD@!#-ZAlY8jK}mxjpeOQ z>{`5Ksr}|ZhO^6WaOZjLlc!pPf$~Ok%6c*_<4NL&1;f8!UEb;&%x!?gPN$>fNtyUD z3?CaVCz~b7h;v5uq;x+Qf8FVidb<|;~^~~821OPr^u>jgBCV1=fU)>+(Oq!q7C4*MCIz`L3Jgg^{h_s z9jv6^#P0Fh$w#Rbu*um?X}f8X>I(e5&ZR@OuIRn@RI_<5%#Z;Vr(R_pLlx27Yd-Cr zAie|EX%Q91TLUwZVm~4su1`D|>-Ip60|gB*E&f`d;nX%J|DXwKd%5ngW_PLJ+0Sz? z!2B%KxG`|i{1Q*R(4QDJZN6^bvj$hol`9G}!X`d)mCCVEY_KRYaow;Gx%}~6)l7Xr zyFp=%mA75J%cq-Kqv3)!6IiwegrX9-YpR4Z4SHlS)~jY>9ZWYuv5|Fd03$MBtrV24 z>VWO+rUth0#|c)C=-aA6JhyIOti+w>M!n%fAmCL_;utKF@Yh%JX9=)r_N8`xb=!4d z8{AM~GKaZ%3v>}{g{A>0T4lh3p)`Kd-y7aZT*7>Cnr<45pu1;QW{7I-h}co@BsyTmo-il%?>V)2j8)QXQ6D{ zGZ4YJfgu`wXs3N%KU>8kMMpbyOUKG|Z(gd6s{lhKD-*5OzgpO*{y8->C9^_>;;ijLGjiNDD4SQAvD`5T;q6r*E5uJ)9g7boD0c6 zFKZvXW<-d=k!E*%>-on|d(bO=!Ael;Ke6pSmU}$AGy;oX5eSqo<_iY%QhH^!`hNrjl}{1lY48h{VKfNzs*3+WccZ(sH_PSu1}`bP?dO{p0^4R%)*;O*>#22FKekc~Q?aF*_8S zL*$oPX~kiBSSiAb?6?*8?WAGxS5riL4ku)=KHeH0*NT>xb9nPmU=T=k2u47)jKa{!dHth&9`;zXbtY$V~&?vNb&x_1^!P zeGmv{SbL9G{>FxvWsZFX>PEA_wouSxg$Ti_=VgY{<37~xqH7IkL-F-UG+c(fFCweQ z)>WlnSY96@xvJy2BgX`a&jeU%My)e^0m#`3tl>tb!onYLtS%C)Dc#nFk`&_->mO=#a) zZ2sM$VK=0|58WmXN->`R)S1|88so=>02Y3~|C)p_8^1=^A{%FJH-fXn$Yj)r&w-o> zLM3P>>3+GUX$Dwc{SpzG9K78S>TZKKX2X0#+tEJI38J)eKyq7I|M?1pzq5kqfepQyr9>@5`O_o+4Lk;v2F;OVB zG_O$&#u9dea5Gg|L{eXN6hN>ks$w@ zLHOd995;27yezU)pAdAJ_~-!d9cOV;U_6st?T*M+FLcrsSJbWNf3o@m$du?Cq}hgX z%T8v%41#B&dHrGRaAqsE@;0Ri0n7Aj*l=5>Jd6`~f~s8yyNCyNnp)Zb3bSQv`&9*U zP4P}c7!sEp=6qt4{WlIlF>jmgOyL}uH-@%I{rTLyUUWWnNSS1h7QovBr`7P#IxaaO zm?Z;>sqJy{UBYSZADtGgYAoSo5k~+f+3@_4Qk?hnVkK4XP1w|6O&2`|OfSUMM{<1X z1s~O69e<*UFYDYP8+M^=n!jAm_*XsyoUS)(bf_mwDYqTwbZ&Qs!z`wp?;QcRa1|NV z_CfvvLcs;}p9ZO!O1bxY(Oe|&+{dHz*K+A2ok8svi;$y{m|lg7A^3$;D<TSTxxk$=0x3C(}Y>_(OBa0!j!t2?Qw7X`=2o=n<@ z6Kd1)#O(0go-BJ4kPGXF1;Nz4C%;h24=r=93J`9Nx@8V;2GjI}!xLCE2ukP5)?1(3WmI$M63T<5QHlg2nlgG^ zQbX;fUtq17WRDIl(|` zkV^%xSvF=Ve%U#duIar)Y3kW~yk7YuFWV2uHUzDUVs(!KKf@02I3bV`r<21-RJL97 z(!3@OZMO9FAnJyhzs?)-XWW7!AWF?cprdDSD`fyEH_uE(KZ{4Tl{Vg+%AFXjnhg5) zM4QoihIT)twnaZCcpa}a6&E(UBnH)-(L?kpK8Cd-PT(XGYVn>ivY`OeUU_Eim4sa* z4LXs0M09K-YvX@_KSP?)w!fsQpLz-8+<410*R4T!@!mp*UA!4;wRzTZ@9upAKKXY&F1>5=)EsZ<(;A$atfUHlgm69K=;ppY8E7lyLJ=EZtaXbtfn zG9uAHci={5`0lA>ItwG$bL&tv-Y)JPxc618{uK{y80>mz*-zaFD#&jM-Z6|2rdCZmI|Y+1=Mo;}EU* ztLH^VItUdYG#4oI3vO0i(AxxhMI~qu6loZ;1EHZtHF39JbX2tW|7Zhp0fDIkQd9<1 zSoF^ofwK@flGkG$po)5j6f|>h?10EEPDhv*-mDidB^;HX6vDB<7Bcw-b7CXHLBN7^ zJiDmdpv=O%KK^=d1*?lagL<0nlw(-~c*MX1Q9qZBaeJ*N-o^Bbvq(`flp*JpyZvv6 zXckeASjDxqjp_?^0$+rj44jRTZ$Vl91^XszABR7k&}KY%FGV5ZX=FQ5Q{DH_A)MNo z%5}eXX31K3x?@CxGG#oL0()yJ5k^Ve#e4g?vGX!nhmL&}5-3XKifXo@FU~KC@)bw- z&!&S;-S6|^PalE@D*oMIzBhEvC9Ldk5^J~tX;BPb@Fzy-Nf#tr2hkjw5HFOETR9Oq zK?ucCxEs%LNuE+~?!tK9*0;;+99q7J&LAql1l-62fgoHs4~h=TljajeCr%N%#&u`5 z#WZ5Rw+8TOagXviSHz{RXMzmxzlFNyIv#Ba2&D|Y-EUz(Yd$EpF;|lBt}uw23+tE)y@OWWx@_@<<~Q*r3Dn#P zRVh^_F8VN}e6HbeE4M0fZ5xJC`?@vQkK;tC)Fd!Bo_m4SX8{UkAt)J~{<7FHf%DY5 zktZ65L0vhxmPq%X7ZGegnXn5j$VY{1$q{0~6J<7}%+5JCb*qJasVRtHcA{v8IoSot zI)+vJbbz&{^fx(2gW<%1Cg8;t{MMSxfKAyL3>XAs3?vxH1<|F5`q~~lhJ22bmZbB6 z-G_YKdC99AV+Ny5W{f@ov;&UHN<*`R}M~9k<4Q}LMGC%st+2q1axHw?# z!GZOHW_0tkBzg?rwKGOb`mp_zJXFMsDJb)OUGb( z)+jcEDr9*Jr=c8tjAi;A!v*gL@jJO(X;(WJSK6dfZzKB^Iv> z)3xAHEP-9!FRoPgC7Pv|vXj(_n1XX4$l!+iiD;FGafu!e^f$9abbM8gxJiaoGn4+Un3=OiA78zbhsv3O%>MLfv zKol52QGnbQ;gSB+l9aKvb*p%o7QvsSLITq_7$bv6t9Pk12320C#-lnr>fWf+49XH) zoo<(;FedMvvwJ6&ydFnzbu%6hjZXxZFM zsRRro?@!Uaox;cK`YDT^`Bh*;A80md7r&`qgCfp@Al^&PQ?#UT;I(h|o1C>>Nw@iX z-)ALdvYWwXh|P`(m1=>295$Wk(9ockN2_aSq_`>!syz~uHc{BQ~<1?tAN zR0@4AR>@sl)^)bvRwr%sj4$h@>UZVsHLH?+Xhk){6o6clTR}su+UPU3(XM0kZ!(x{~kESn_D%&6iXqh z4653p{EUd--!96N%PCD%m~&E}+5X){X0|-PR;JWBc$eymei?&(td7!;F)C5{XM)L8#DO#!Xopmlze`@KO%pdONq4_vs3C)j&^*>J zF-wK(q+DJvKYf*u7*n^hnh$v8RFt|fRyDPl+jQX%Kq5Bu|9hod4?{} zK+X~5V9?2w5VND@c>TxicpPjRr-@e_2##~^^f5H>;Q99KOQ{G|tBV>2-^wXKT9Uqb zA)xEb!CV-*FVYlOdp4*0g4-v)c=5*G4HZ*%^`A4M%(QwSV67mtNNy)BuNz)=_8@c+ z9s|x8J7sD)LJqZR;)SRLk+oabJ)<%V8oq!UnmaRH3(05zaouiSEXdEeyzg%1-0e?0 zwQ;Vh!yg0;fkZ=Mh~#5!4I@tXCz@_mOd|F)n^C=RneoT$H6A$$VqZW*q{mS7T6^GNlZBqg24 z=(%{s@}ZE2WqZWMAx&dW*x=|6Q#>Vsn8z(O{tDmVT_b*N3Mga&?( z+_u=_b_;Rv?jABrMK{Q4-rH}k3Zzn0aER|oK9b1d_<()C<*;Uo^l=~Ni|~`Gh-yTE zj_{Lzm0Ar;-AP?i`Z{p#*l?)_rNiV|Pf~SDJOay6HgvPCb-s^b6Jl!C(60v*&RCUhJjF0S4LOK0MaavEdG)Tj^;jBdn;dm{}RTs zfIc=J_y5ZOWeS1baq|4sEm5jMx=-Fzmo^k(ct-Lp@`4p%I+`hr119K7O+`k-hH6Qc z%*)C9H9!uAP#+kybhr&%a0Q+ywq4;cg1|PzQ?VRLj=VLt zC>?g_I)XXXYuciE%`)$k7bv^un{0o?lenWe zB+casH-rDmiqHGwH~qxA5ZGt}Oy!lG6TdpF;707XWe3M5Z4oyc&zySQ>`S_LNqd70 z>t&X$PE?KJWFoF#TOqCU6bk{vG%#W*0})H{VubMsc7W|Vj^_o!Gw9Y~uJ?yf{)2?9 zaXGT@R)*kFGw^wuvk&YL6X={o>}2{j3T0(LzdBG;&P}7=!qZ!+(_?Yd`%^Js z1dduPEQ;RT%J)5!90U9}5BS|rJOL!UPc55E91_#Hdzp-O1E1XdFg&v0BTZfx@)jxJ z4R~S50QL{?6&3`~iiCV-W-YT*hli^{7tDu%*JRs-vz1LEUVw+m0`AkGb0>#6q=Z9v}Kl;To3`sUbb zGl@$Y56DWUxdO7s%%cO|{oIyZup*Y|nci7$l5E6PoOEtclEV(`esHQdt+fWSe!N24%_K1wh<$9b30Xw-$@VnxVK$0dtouQgnh9Q4QIuO+Bb*%^p^u0JpIUE7VWVuKD z>Deqa6CZ^Ps!AnT9}ZFo+gIBkIKMu)EkKSwC~>l(x18laIRDvrJ>|(WdzX87A&8OUhhusA?BLsoHh`bMsz0naeD+9IMI%LLnomGUcpMtvjD zdv^Cs3Hd9WVoIM+j^U*}pqkGQ%idjb^~)O7 z6CwV^X@Cf3@N%m~jV5l?d(fX$W~@yctj9?QK2@q#;d;n;lpzs+WA{0*HemrP+LFSb(8}w;;pMt;fU^i+A@-+ikM*oMQ!S?5>*>I-Rz@(tcZ{oV=R~@o>)k4@#ev|Dq=I8;5Aa#& zXQoxQox`@iNa{8O9YR4@tA>!EV)%B|^Z#T03l{RXMFmNnIR&D}y-QuFho6GIdD@rX z^nQOXJTeX{W0ODBTsYHY88-GSAm*!k8!_}OU>R0I;9}T04C~O4bNZR9-t>jEF^9h? z6bM>{csYL!qB20FM5MihE@Jr3zXN;{{zTl4svbf*y_?vx@_qHd6ukjfSSe2Wj&i++ zv|K<=XyFWt|H@_t&bUfm9}$EIoI_v2kuoW$q2Z9E=Piwv9By6O76q{v&;~y#g`mfV zpjNf0#QE$aR-WQ&W3|6xU38_HPb8|jey{iRxN}$F`m^Aa=5<+a7`} z$k_ulg(h`6Lr4h1e*lPRfcADY)MRy!xj=Y~FCl(DLm+9Y&}?z@)r;6-^TvMsPzbn* zmZCDCdrxTvRW*3xk%@tc>?Nv}j^_sp<<}D_TAd>u{MU$p5Gyui4L}k%i`X`614qmm zK!E?o-_H@QDlT5E{`gz5s9|Zx3#ST%wfvqM?20?8e~yv4AK*k`wRRHE7!K~L&x zL>#yF$N{jNl>9XM@fJluy%;|Yyla&Q0(0P6Wlcgfbk@C2q*_ExNnO4h3c>rIkl z7oe6qk)Z{tZC>{?K8l;29U$$+4jhL0Zh+rbRg$1S#Wt&Qcrmbx^`zBqsic*RfndH1 zD5543Do`4tQS`whW6z|n_?_H8u{7-07feZ5VPp(6MDZtK!&-hZL8SPxvB5H{)6wCk z0pD)b=&`XGK1uc~JfFoVuD*YrFvLLy-H0`cz3>kcrll{SD57$@qpk;(Q4WT)+`rey z&IZcbFCBaD#HanzXsOL1DTtS%LAeANszM-r_hX00_jk~RLX(GgNFcwnznw`5KUILS7%O#8C2A*8bJ4o6&!v&wCNF3s|-#MdW_*q zUYQ0Z4p1q66YQeQl^Xo0pf7+Ls6Wc%CaogoP&%HVW;M6Q{nTpU`}YMGgDY|_gKK^< zEpF5`eWZrBxU60-oCpU)3b(8;g`4zPS1VT*0HN{Q6^lHhAE=JU*A{OF1ru*j9_g@x z37>WIq_2ZGPCho~DI9<4sR&r7eK>yt1f%UOL5vF3KuGI9q?Cm8d_HnHh6APVeZDum z8rTYifhgV}kbr?6dH{HSXX8uO-+MZf7Ou#Pw8T2j{lR9Im2S7;S2wLhXZn-ihaJg* zmIGSVIk!j9Jx%MEj@d&MeXJnDu=c60qErIz0Qjjczuu2!WpfIPe90hTq;zQh;`MJ_ zEY~u`JMSs?qzXH;5XLA<2c#e_Z?Zuul?3S4f{t&7S$PJ3opDbJFqIKsnZ^ zaQSQqVVJ2)HUR`Ec2*1JaSw7B1h+s?EJ&kCowub{#gX^r@}&~rR`inb1>vl2;JhU1 z>24A&^C}ms53C7EBTm*zpLZrmg^-4)b%v$Sr1l=%8iv!hoQ3;f08a62B~YT&ghmEZ zbc80yIeQQKL$`9d<0&u19JAO>R&aB6xy+PQguFPH00wMWX>VzNMbmE=u;svbfHKp@ z(^XVK>2g0N5%)@FkL$k!_~}fEo7*0!eH&?RGGmZ*^ei3sO-NrUfxDNW38ekuMto~RKnLhl90l>zFU4 zZ~y#D8q}^I8xDxytQ~apei1_#6*IOIIZFfi1SKAjp4mHK{}PE5HYDDvcTsLswbx%f zs^y$X_1j1INw(g&O``DLrq6VmwpjEAT6d5T@Xix_k=nE0JDlxoNkd`DkVGtvhWnN* zu2lO(jxv^=VEw7XfIQ%@CS$Q+I~^BG(Quz%CL!JJ@v`3kat}g+k#KN#;jz5c`RoLV z!UpfnA-BH`HQDtpNz{7o}=XZYyAT3P4^acvpp_s-V?kJY|rSKegP2L}r#!hHYgKDE=xe#&krD z)nwq%CI;mne6TW9Q?Xm_)~av$+zbO5=Q&#cKs||As5SW=a)#?1h1)PeDUHyMs6d(Bx?G#;cyhUoFoU#hy0~@@18TPWK^IiZD|flF zr&$TAc5FV7LM8hFSA|=f3jh&D`!ecd{F&$O+50MeE1Fy6ZfJ34X}fd0`$h5HacUKJ z=)Mf0JCxgJ^|4i2n1>5hzS$^(Nb5<~Fk)QEia!BJ2?Uk8Ux;^4| z2IZEf=jnxXGpQiPiyNWWE8i)|X&eO(06Zr9g+Mexf=Yk=oD(8)hZoKYJimNAd@R`{ zQlK?g?^MSKQ(+xeL5~y!_073Sp4te1b2S1h8$-zvLc6=*{(9>}p^AD>=EWl!;=q6+ zhWCNTRV!8ZFfNpWn0Pyu!>k=O_RG>gMfbsKCeH8p5Lw#I|57z2iNhR=p_5phwx7j~ zBjGf=&4Oh>?z^2)l1~8f*{K}}4pMVecgYP2Iuq*(|i}QrVN%DIabrD$Q1X@~zP@fDfz`8TS-;ADl`GEP_*ruIc zM`8ye`m#Zi9oX}g2`UYVJh3{jfXpi)sC~2RSi;wY$fC6Q!O|BOshUYeIDT#hY&crt z)sRS?S{V8qQjOwI9w_jJ_3~^_=TK#{sTefhm#X=M6c=cZqy18C6UWUCFLi@{kN!wWrrhn{nLz`;~CDj-zuw?^dj{bo}kgIeK9C zXR_Y-e`Cze=7LTaX#2ReRY}WcyfFm=TO7=e25KP%7xAZ_q)$%_=*VP@gGC;BGp=#8 zQd3w)e%;yx?PP&QdRfpr@7n__t74o zaLhw<%w9gMQ-N^;b%mapqbnDa{-L; zP#$B8nifOI=FaCfuaz*2&{K(Dv^1)wGpf2hC(2_(kTA*SEn$9gq|iaan<3h`XH- zB(hZe2BbdVhV}s{-Rx(M@4&T%+i`p8{zL9hMg9;wYZ9|V?-^+=yDia3_mK$@;XG(# ziZEc_Oqz$Jy-3B80S|n0)3BQB4gNa8TPWEjkwlP=F^lcc{zyEa@An7r6i{WPEQrcp zsBVsU)#m7M6?r2-O#2XL7+&ZFYEI4v!pOzaLj(RC@I4Qz9Q*jgY@Kek-EgGsBcJ-UOMR*e? zCew74$wN*<9+}H_Ii2?dTfCn@*U^IywP!0|*u}iqC=;>;U{Hq&W~=($d+kr*E&&Ra zl|CB^7P7+ESf;*cDUm~6`W6OZ=&rRWv}sqyXKb2vub-m?0jM3@-3_=PSJ6b$uq<3I zHKj$-V~eB~W+t>U2(f~&c=TWKFNU=RRlR3+9YxPaFRP%_k0t(&d~0#jwWoz3+-c;6 ziw1X>Vga$JBRpQFM?JG|^t9A=eMR#&FvaigpXdo&`b77SWhlgM-v#g>O8 z%t%EkELkUM?AR5;rJ&f1N1gLy()zs=;GDPSBhj)Z`#c<_ld2{mm@qe&5ZaE0n8~mi zBS)?+Jt3X^slTGPSbad6Z*^9KykJYCo5rGE`RzPCH@dHt+796D7LFHcBQBP($QuMU zs)iJ&9Bn}LsW7ocli!|OgCwk+vm|E-ty@xU(d)ZAkbc`|JFZ@PJPK(w4R^twdaxpw zRoWwN=S3V<<}s#;Ts!7{}I=(E<<0!*0Nr5yfP@Cm5YK=Zb{4uG~b5x9Cv?UsxsAeO-$Lh^D4SgS+=M z$fqS&16(9jq?`L*OlCg5IRfj{cR7LDjg(Y1Hz2)R5E2g&oqeW0EN)1OVxf=l*Eg-b zrw4rv3dfdJ=YLTgY7xP@9$H<&_E`NRUwCFuEQbJk@i`Rev(vrUySh;Jq{##QC7a1} z%-kY{YTC!?C}|M62mD~KsWaU4Ko`}4RLnLn(-~~q*vRM*tH|`iuxVm@i`Mck=TU%; zCd%wb?J6FJx}ZaQ38Rlp4tv06l1MRf8kY{RvPvKiH<6!9^Nhb8v}p)8y8)O+e#f(a zDZoW5U;cND7f|>go(a4wd8hxD9b`nZkTQi&XcF-ND?rr09|&QNyPqW!J{XSm*s0Hw z8IN6J0;0EEW<1(l=DcVQXynh_du>`rLgi-mc*6y}gsStVg8NRqBHvxLFf%?BW37qU z=WtmG^~eqYQ8Ko93v#^gdSE-%goJ?%D$F4)&CD4Qm2*+v&;K2<_UOa|(k_>vE3LXE zwMHW1%PUKI@nFG6eP7FH_QD4L*i)%6SOcDS7LtcBjDkaN3Pr~fbynEmwE{Bm2kTLZ zCUFRZ6I6?4#Jm-#SrO5#pYsqJ&0udI?*&3Rgn@7(XU4)tV5jXbWO%7%(#^t{93k7Q z#_MX+6BqZj3rMzI(k9(Tp7b_RNk7pyZ4A>to13cF;eLsaN+h%6_~w6tXfCPUvz`SN{4{HysMlf8+#7)!PpL6a4j4R?@W#r4! zXTCzxLjra39fxpkQMSz&o&xMz?5q+Dg z$s%Q_Jjd&EOCy5}eZ(-Eb{SwaV~%|at)vO%XJH^Yh0O88KLrn#IsG-43m+Udl?KJv z@i{kG8DLGJFzx1b8dDAQNqHGu*7tfye$B8I`~T{Q3dZ$4d~oGI=m%!H_5mT)aS30< zK0S>4*(uE`Z%YnccvKiPp*Gq#8u8vEJ+lcb8QJhWg|)#}{3C;mpn1J^UqR}<(lv+y z_LWtK46X`9HRFHbb`n<+TXmRe{LWda`Vd-}i`8X8O-_m+sri{HPT5rO-;9V21G}v= zc)6B!mw;LX^ahM6R6$VeKPVihAWSgwiWHye9~3$kmxZqQv2P%7U$wC>lR4yglVSLi znlmdw9zQDZ)VmVb1gK`UFakn^NWO?@$0oV@0`v(eAYh|;pKyC_jmLz9R*ssNRUODJ zHogq5X){ICdL8Y6qPA&vJ)87tf`;soYO$_4umz@8|vT~Ws7@ zVaErN715W$>@UA^zc^8nw(A(-5H*BF$P%%#^@%uz&J>kLcGG=qu3~+-!(=OE%*WlA zU;u1G%q+}6k7z95G{>+;sa?JDyck+EJAtLqWtl!)+BYs&V(*pGW*>9LNU)T~J%OC` z(zII`UA}>y9a^*l@(S=PRgsK?p6)JJkHwtSV9bX7H zL2)#@SxJlCs2c3?6?VZ!HJ6>t)(yy-~T=k#^O^l0Is3geAG zHoA~K&}Y)ce*qKybi1wu>sXtN&_HN7F!z#WFuf4VN1^lN2Kv%&L}jwmC4o!NLM2M*qyafTJnlj;fW{qPITeJ^pb1~A z0=1iTGwCc+iv>kr(%VCvT>sbUsccMCVofkUMpF`X(iQ%cd zzECPKw_2>1F8^Ba#tK{--lZ5Zr;i`SIH7}ML_Gi%>$FE$s1yH1VLA#~gMLxRqApE0 zf>g|Ae6O&Y*|{UwdD6lgmc~c~5)&bhxy{(&DlLh;pe%^hOcf5AuNjfY#{z(>avQbY z3KnAYuMfBD6O>cNvMx}9JljJ8GL9C>-~ z5b}3|NDx{H45nCYKK#!Nx|i7m2B*-`wpA>P%A*0UeMAT%b?iWw$$#r)k3)20BY}nZ z4LV&`H0D`B7qBac42)a$>@$>pZ&dS&ibBOQ{Q(9J6;B64TE4`E-*sqp2+avd+}Jag z80=s4Zfie{KFkfkG2YLcr)c5ZIR^5z=(>gKIJ~jo0sAm;t^*+3Xjll1Xw#7}gx3kQ zhh8q>I(N}k$rPY!2W>_mL`Lplr;p5u@&jWMYt^2F8ww&O*~Gx?aF;#F#b~izsc>-f zvNJY$QNy$zJi3bX8Hs9L@^uuOwi6Lq^9(Vb|27oMP9zd^2L-X9qpG-}<-ptqd@6`u zScG3+g<4RDEEG**zMN>4ZsXh3A2;K}t@CEUGsfxPMZBa1HR=Ryj|!9T)$XDdh`#%j zEVgeyg0Y+IpF@3+1z-C?!;O#pXo|s3EK4(n9zBK^d!fk4kMqNJp&(eKM|%+0_CK}f zJ=G;85k4vZIY~g02MGebP${E)xyK74!DNsWT#+<$3-!wnOO1iiK}0$KJ2u8*D$P)4 zVi7dNaX)MVf0(2R4ydTf;81J_n~GVX`k75sh{hy!Mc1Q%RFY67q4czNw-Jf3kQU(h zEs40hnzIZP$_pDZ17LRN&toe1z@lpgxuynpn@#XOB{|z!UPQ=hmM7Ihn5lVHsda0n z)k^+KpZ-$r zbkQF{^Pvy4qFV8wH*^TK7 zZAmV&*;bY#uMsb9VsAWBp0nY7+N%pBACI}jRR}nO0|^F9mAPd9;$4}00~*`-vNJIg z#~ZrT@kiIwzuoit$^QD9&33SpgwVgf$q#;BS#xD#9Qm1+W%fv`1N?Q0drm< zD#lv_tUnNmrhrws{fq?&wTLFSn<_%8QW;GsJJRQU!md2?Hd@=LkiRq2F0pEj#R89w zhk%kIm@9LVw0XGZx;@_15GA7vyQSbbEF?s>_e{YLfRJuYZ9nEoxW=Q!J;oI2z~%K{ z&fyEpDBFm4TgZdiFGsdgHKqn0I-{W>rZ0XQh7r`5CJLcH~>P*I%F--f13 zMiXl1b`T)fA*yPjz!*c8$1S*iFv7y13$#OAfZ8!(gT`0fyf{>DCubHUi?JaoNt$R) zuaJpyRKyn#4lqJNmr_`F1*P>eP13>xSpKN6u2t}tzH#n7&06>oaHIgvZ3fZ8DYq`4 zwv0YVu;3~!`n=|9&m_u~`q0_0`aWp{_`P>gWc%t3haC@+ajOu z##8;pMP$8~{Hv+Bt@TTW(h$94N#1ql*gQ64VizsvVHnO30iE|h`H8JIpAS>2 z(49xT2}-A%XQ-s!E9JyIxtJI%d}6f3^1;fZ>22FBIS|q2BICH|i=|N^ww#E4-I$tL z4+(E61fi(M0i$2zI=KQqR90_J&S$kRC^+P8X9O(sa%}*hQ8jMgF7@{8DIW0g1|PG# zfs%NUuZmQ5jM={1^NE}E*N%Q&tZP*IG6ej<#!I3uyz%s=4je!wH9nHI!UU zTi4Mm{cx~n#$3kO$T^?e`L!MRo6gvG0R!yX3-grfJ6tXA<~&t%2o~b^2-4vjk&kEO zKU*+&CqrtGY6u$ND2UpsFl0eny2Kx*B{P+GeP;%~Q@#v(++Tc}aExyA)N{UR%1AAU z_%{*vt3(XWCa(Gz4GcZw(AtLe_@aBnJZYrJ-hl*s0rW%yEX8nYfPWs$@<|M*pPW6h zV{a6m^&Rb*kKtHs0rz;$gTvB_ou5(p*AO)2o1sNZ70hA7l56Bznmih&Vy3OMRTdIt zyziS^8l672x~VoPpRAYGvTYx=>o0SGwE6gCA~b1z1PsL<#*==e`ehANUXMh7e+;1S z_WFqCW<_HvTisnwp){(}V0Z(e}!lXyf@_#KrIi+zW6-^cGnLw9n zAPaJoMz%=3tHsDS6zI24oHi%oO;h4k@wh)%uLrNQnqhjR@rHu-T7GyQfrv0R$-8qc zQj@p;S{I8oPCmA0PC9H(2=cy0I(0F|LL!r(tKm{<8m&CAf@eIczDt z*8q76v>sWaM_u>gf4|QpS1=hA>^{0AmhE zs|{7QWXKXBRFg|zfp?i0VKz`*Nj(*D?k? z6#Bey=ZGr8jzmBrRBM2H%-02KMiGPY#+N7Kk0wxh^D*pHNq#hN8w;iV2PcAXo<2KF zb6}r?;=nNe5~*(b^v zP)KG{uHklMDE2@|XRMSX z>520;{Wt5FKJv$u67RJ@hH*l+IWC*e{(<)350&9Y&C3?3dy>-bxSF%N)vYX!{2%>n zx>e4nAcD7_z=%P(G#o%~jXuW8w=9Eo^K!ZGdR@2K2`bKhTPWjkJME5tJ3?1tQOWRF zNHH`1oO6@WNmUPRPUbtkomG<_V2pv>aygkBq|2aowLO~%CUb^JByI2NW6KFd`uVin z4NN5T#U(L+l4Sd%3OQJ?TryV|tO;obfF}jxmNu?Nr^F^Vip0Y^E7}#l2ZuQLj77*N z9!?{HvRc*qpuiSvgF@}H+$A0&cJ@#GGNiOZ^{mftRnL81Nd~h}ZdC$^5vFq>=`?QQ z=P$@9XM^Vs4djge_ZnD^f*?c{)&q|KY4luQyiuLjQ(a2S@E+u~b*Sb;1B$t5(;vz_ zKKjh(a5&{3NbC>@=|H@014%?ackulLa%NB?M&Qm?`k4K_)vRgcYj^n?nnm@s7BCL4 z*q=Ztk??0Kt2)wzHqfPL!ePsRV#k%8VLR)oe-^Ds0}UPHe;Tp7k)hkmGOdHQ7y-b7 z(+{QHNDilK>M_Ry2E98<)cD}cE4H+3dscdpt-Gp5!KPw$aAN>;{h!tPeN1{eGIxJ<%+`4N_#p%OZrEgy6Q#j&C zT!5gbqQ<*Qpi&^->S-ruT2JT`R`UA%kK9?{S-Of+WDO$WL1Zyg)DLtw!s$Yl zy=`E$$3b=?eq--TKzuE*`i?F`TbUhbT{q##>bZ+#@BBs$y}!eZgod~CzRair%n;lk zryujSzoA1Rbr=k=12~CKhb!wc&~Nu=NS|3p4YC>hja(T%xeP>K}5e zxbIK7CJE4GhAeIS$iPCE*RAsF1;hh0!Ieo2ZEky4TzO59)jmuyFi81($lJfZl`P>} z?2`~t_uIneoTiZ6rJIkm<1e8ytq}1DugVNRUJ>QkcTIyAE( zJ)`4>0jH*&-dErv9ghwnc!lJLtYD83JZ8Qz+Ze)jRrGPbd@uhJgp!4BE`ZR$;|Sm- zV`2z42gX)y2XcV!H+9w7c{3mNR!4!P?aDRsOm~~Z{Z}fN*DmtgU~nv{2`q_-L(bq6rzM;`+6k)2Xu~{S zEmL|_xftoQw4f!oeT>r{`wiI;$f8o*^pB58x59?7=)!%`QE~MKZFr?Pjsg*zcyNA# zc|EO1l$S2bmlls&xkz+M8_7nFAcI7I z5{cHzD3PyJO(A6hEUcKh;~RnxqZ%%08bE?-V8XE z^d=y|zM*r(nwzdr&#~n_nL6l!dhlwLnSz-G@);x#09ere>zFX7)YKzLY$FP{#{Bjp z@5Uf8!4R#xt9WWB(ie23C;3%vYK&e@!D|f4gV1oB*cm)-37ixDZEg789SH#xsv61a z2%P=dX}@PxT%4SYy{vYou@-&K&+iK*JZpQ{nIWZWq@-5+pAz0gm4>Obf?0w01r0~= zyXBCT*RX!*1p%3ot3CsY+kxcUqD&}S;KNSg6Cx>l`Z}$?L z3+GK(tmUYg5epYrU2!*$*P5OWj2>fTr5I9XaImHK{Fk#YUvn}#phH+3yVrs-xArf_Q|mJ`YwIiXyOahbYQFTf@NO2#3*S&X<-Yz<{d_9 z)nrl~M_W>6(jD$uywmXw?o)8BMSS7XnD~^}nnTc`N^9vUGCq8YV&do$UvYs8TJ`qC z{7mfW8r^@WPKJ}TnU@1IZ?TiSxf9!DxNk3e;LI#`2^SW`Ne+@R$$wb|^}r>Bi-fE& z!2RI%ReZWa`eF-90ctI| zx0(dha4Pu#kTPp5j5iUw1ZQgc?W5@?^o?}-iL1K#U;-I7E98Q2V7y`!^bdU2!e=Y4 zs%7jh9zk>4TJ9l0t%R-asPd{f!KW6?GT=;*sjUIJ&epzyKBSH-DN|cKk)CCYjxO9w z-yat7F~^%uT=f1v{)VpG0SG)xBwT5Og{*rs1^7`jhql86Pim|rnkZ(t#hQnT-z}xu zVg6=q!+@sy(sM-y=}#Vp05ofABi3v-^}ruIx&3G%n`8Hx(RB`~_rl1_9jgs{x?Wqk zhQgm)8aYzT1+b2-iRTX5!g=n!VJ-9`$S=hf^LVrQIp12Y1ulV|?pYlkqwU(vq{(>R z7R02HUJ*GMT-?MY5xqS?yd5!kP(>q)%~1heh`uOiIFAK1^7`wZUtstUB8$2IW$k9+ z*Cou-RGYydv&HpTli2|2@Wd4{=2DF0Eh2D;ZN&6)us`j6J0t80Dweoh!J#Wj9jK9` z=_Eb3c_`bC;9dV-Bz3qoupEqLx^G#u3Fp{qps=9x4=wlJ*Cv_%dQA*773wlbzXN{% zQTb*XTr*=r4Y09a8E~(QDcu&K-9~y(v<@a?)qh(H=_A&?aUEWqldYEyGj@0EJI4i# z!o*B|5y%+{Pbp#8e}Mo^IGHHk@t{e+;^w~@Al)D_YzvVG(%Cje>vT=$7)n7og6q^V zd_!4{%;Ut_g(+dc4^#+KfV$RY9_jScNhKM~HZd9SO9Vv>fU~NILLFcnA&yZ9_A@sg zA$$JSMoZ;w^CmTN<~4b|&w92+q6v~HEPWT_@k#TKPzlh%!$O|FVgC064@55Eo_8?? zAgz6z<-7ZDjT%+Jd$i5mJv%uUN4oCBNZD96v*6*m(h%@$6cQ+3HWL}(Cy_h#&C7Jk z$GQYapqzpwn)e@l>&CY2v8!CMPTYQEbJT*_p*(AmQA1dE3bJf@!hqTE6<4X zWSDZEf(GRncy4g%Dtk}DRe8Rg#Ux!gnU7ze<`<_V5Y!Tr7yN)ayulTu68+u7#WQ== z)#gJXY=sfPOX~)NX&4ovwx@PF+wasaw?#sK#LLou(#g1>T}@k+H?GZ8JQlk^0daoD z5U8&D5SHS@4*oXO2adQVQSY7%27n9PQGSiRhHAD96?yTH&|QjResMHiQ-E9IyO z>=otrQq8uK9It}p9mZj$zF5k%vD<}}P>_&qK`2cYw2#@qVbjX8AAQQpN8&nqaAN08 zeOw{q+slCy6%l&Lo;%0Phm^fOU)wyFAWzDJ8SQdy<*@vGauK-@$qiTSl(8bjR4w1 z_3dFwnQ2C9tyWdySgtwH$RaSs8!h_B#*4;nV^D%II<;n`pbl9N=1YBpnEFEJo?Zj_r*7L*{zr3>q35Zl5GlvVTHG%QA+WsA{45B{0wn1n;8UF1Fn0; zqLkl+h0!qtc;Be1i+5$P6~N>^LNrX$y2}>TWztr;aWHyKTo-j)5$W&V4Bp}B*TZ-P zTuw-B+S!+3${r)6Gb;Z2vRYg+!{f4sO7=llKs0ey`zG+{3 zWxwUGC}+|&k5zPG-ZNCp`=~D$To0nED;UErU+kPT3H9`uU>zZ4P-CqOEew(I zRiYhMWr%{M-5M1|e}{*-U5Za=v5>>G&~f-YvPHet4g!sUz#KxHnxo7zdujDUw7w_M z5F;6}HmAQnQ#hrKRSrKQo7Z8sYEtT=^+n3bFp16O5?vW2-OOPLecsk1YU4|D>Cwrj z@{Q!`B?109usHxeVH&+-)#?rw`c3BcW?;Qb!AYeN&V{+1hW^M^rc-QsfV(5rzp+Q8 zq(N}Xw|!&}dxyj0m1qFK4ROGQlI9%VEpfR_5wp}FbEnYy*_#$kLq?y5*=82b`Uw!N(m7$A)t0I1bwZc~v?b)Q%i4A5UT9myN|bWKIZxj4{VgNHnCbqY}W zcrg&wg6icbRxZV4Jy~A*Dcch0{@rHoXaaCoVIgXS>o{XIVxn1sti7EArz?9l3Io!5 z_e?}(^~i`CY9{J(jI%EzN>y7Tr}+dxK!Tcso~oDXNe!z0y6u zB6Ns7n(Q)Qr>21A>71mbI`Ztgh3&yn5uZV;h7A`tY7;vuFiNzVjN^&W8W9W8e_BU3`e8wW!EUXqh+2>VzA@BR*!hcXOOvTF z?*+2A7BX4D;vzjY##+cHrDp3{@Ec7D~b7)tO6~q6toc{v|U3tM){#%`!A6zaI`w*wD zy}ShOEioInda+@^n9GORr2NIfMa5U#AXjaV$%(?1ruJJJP?}EY%{gQYW}=E~wRr5j z>Yn^SCiT}j)Atxf%gQw@^ZG)*sNdd1t2p98=Xn_>ZNPZfkQ%x!f-ArMaWHrC05MRY+ct#x7I*D8XmFyBmvAU1vlE|0PrUG32hX%ymM{bdDP^{J{nIx*< zZcOAR3hdsLJO|@iaQ?vpNOabdb^7VJ$PyIBLZbMUw#MgeUC_x=-)jDoOS&*0po4J0 z-Zq|MIuxHbbwA{;rSEqRPUZ+Gh~V1H`EGqeDHbgxC}Hx3*N3ikC1!V)?Ki+~iR!@9 zM%w0#Y`}~3tze*EY4V-z52}@Ji~g8xxSfczVY^}VUZ8}1ZUsh8^_Hr8%LC(8p3YB* zm<7%vxm$o2T%gLTIH!rp!Lj-7=ZC^=P>nyMH82^Vlpr-|!9#D!QlG*nzgSAjy~JBd zj+B#7c3J(jr@~R!|CbG(6(Q^q*W|lQ?*tN7lk`vXIRYHWit-<$lt0jR z10<%sq`6MFWPWnVa7-hnXH>B$iKW4mab{ zfN_+SB||ngTM!N$A!@$-<$4+AjB3YFa-R*8CYWx_*^stVpi6 zQYcbRFH}MtUsVX;1dasHkP(J@+bmA8kBkj-3!-%2fT>Ie+ku7H0zc3CW$nVf^CaAW z30Lj860!;$+p*fYV1yw6_DOgqVEo~=i!F99OpPtEnXQz>K?jDi*cfHY_;0HPqJiZ4 z)#NJtcnJ+X3@7~$`ss?;fG2c1=D2+F9aPMc?GFg_x3)LVw`Y`(l+5;a7UPPWW)#c= zN;YADw$1B#MRbgs^Dh-I9mH*Fv^?)5PDp5s(2LTLR};Z%a#=-}k8T!jS%H^U9EHb} zmi=^sVzoBbmIfiensA1{wAXf+U^ul7VIzWLl3G*;5V9qaz%z9a7FnsT)m>otH{muh zqtk0?(>73)1}@B1_^VyUS&SNU5!*a4>jTb?Au&DL1!HrOhv;MzWlTw5IoFK^{Mt81Ktpryo|NvUsA{DPM16|;C#L{)@AS+~GXXK(q-#(b?&^Q5IHIA&*O zU|9R|%<6PUGxcspZgZe(u1i#DP)1e{3qa;fg5^7nOz{jL3X)`Z-rQbB92oOo!plnM zJ3(vj!xSxz#mg9$1dJcjLv&khx`;FhQT|oFrx6O?+%nedit%am^B1kpX<&AvjW}$0 zwZ^|t(uQU;8vFAY#f{#aVU-0+`WIQTnekKGxg4Fp7IVtx_;+?cS%al#3^=&*xHKpa)s#%_2cJJNAPPo^&lf(^_dt+ zJUfgPAn4%G3O_emu6WDV=E3$)^=(w74~<%`*!j`F*DL>j_IpT_{~+^_0z9S%Z8z> zzY8w=>qxa8d?O+S7T>ZSysl4YdQ?Z(+Q*#@2JKJPuETTi*YpO@XzVACTYG1HEjYd> z^jV8gV11LD+4Uz$j`LVGrRrnwi4h1)SbOTQsSgxvJZF8Et4{@UHKE3{J!NS2Fdyy1 z)!#~O5q!;!@EPL`WmbvznRyD`sJWI>izw0WxS?Mug|YV_@>(fqB#HyEMSRNWAP83p z1neJz`m|Aligd$kX{>iFFjTQ$anTHtR-NAl&pKi0r0d{{1(_o0LU)rxLydED@7^uHv`>HkwXkiB z7k*k+P5I{!Mfip`La|0vvU_%E>xKxP(a07mIAOv`f*bdi%&uf0I0D z#P*&qhs`X($ph;vF;*)JN3y{amsphWPsE_3Ote8rV>j&C*JUsRd0Z1x`9JNd;!6g< zUO5}G*$&i9pWig3Df9%Xn*w$FCp3^PuebUz&zCilbvm0HPUSf63k3rcQ*ZFz1L5sh1{8@FRR9ed>Cu8a#``|rWN zUPxJD10!s(3_lx?ZaeUwxvNZWwbfK}K0V4JcHSbF=rP3Fzu>K{q|}$T7r@Rle`B+a z0GjjFY-s&@TZq(_LNiMD2m=pKHQ^}6MFA|1-gm}uk^@l#S)fF8a%j1c0t7#EWH<r{aHDjg;T@y^Qu`CIAPxQ?!M%!Y1{5_6#u z&ikpdCs8Zj0BcEd#Y>XC2bM>k6p}CGPZILkVFgJ#4@UnvS*~{J7Y2A)qC&I>Brzs` zr*`4{tr!m6EUMGFu43vt#VL^w-Ug2|0KkTcNb7m0qx-wI>Z`W2<~M8{c*QVgjRCC z{3Moa{$S;TdyV{YVtvp)4hkO@+ZBKk==CBxu|{7GA~v}NkcB5-_%p#n>6O(@O=CD` z$Sf|)2NY&NREvkmdWXOB8fl&DwL1rZzXmjjM@#@~!XytrctBQfHL=r0@t`J}XaHkx zN{H$9uy|K##@3tN%(%2ZPj{gS&IuAP^NV}t!9}oUr`rCDrijBrDdC>rhDYvs9IC?o z80Z7Pxf2|pHU@P>=k_f%)WCOpWT$JcUr(?4fc)rQOR+&#EBMK9Xam9r5%}dJO9{^G z|7FJ|?C(be)7+Y1iJ?1z;c_`4&y6hw%*CL{d(!rZ#MN*m&ri~wfa?XLf+^sz_8q+Z zgbHvtogx(cu`E13vNHTBQ^}uxL&f0Z_s&{4gF^rfNA`FR38K4s?{RS&rP#ztW8yKG zq5Th{@u3{`rG&;6c{Yr|gVCcZKR1|hrR7?k6Qt|Sz=ouVtS(qt2*sK+5bWU9#cg3xH1uo48SvSyvt*ODWRcPXQ z|1?wJLxsfvopu)vo#Q2BEG{>Z#E4(B2*6es1$mSagB_&(bF}oDB~v38`W*OhEIwZ= z@Fd1EJ$$hkX>>hZnov*9v+t7W@bYxrG+sIKW(6uY1Qy>*p zj2?98Z}}9AvoH#U`Ym*7IdRoZLmUHO9R9FgK>AQN!(GgUm(|#We5tD4lug2=s{kFb zl*D2PnDbz9@7NyQ;~?aI4|nbfaKm10_y1rxm62vTJA!VJAxohO;(`Bi0717B3XP#~>PRfANGj)Nwwh8POf+j$EK1v!Kt0WR%xb&xpwmH14z;hT4q5RMOVShU5!6gR zlX_649QF4~E^Vu8Pd=ZCZ64&Stm{-Tg3l+3;QYRmv|bb7PJ@X2M~(_d(Jq2fu&9b@ zx`C6r$M@#~IMR5OA6zBZ(HLM9JxaRNqb7Kbi^r)n53^IAi{Ghq<63Ff$E`~&POo4) znOjS;{=dj?`yvc**>KTI5ZA{Xc>L4$&!>Xb7mf|~DNZq+J9`RZ6}5ln0GF5ye6ihJ z3|KoB?FY(0_2NBZd5Q$HI0)pAtQ|{&OF{3T^fislRca|f-A-|}Y7{p|9Se=jnL*uX zgq)RYF&4>9^UYSJpD`5xeNr1#LwT1QcWo|G7oZ|RLr3La+L*t=E3cWaH7wYK=wfTg zNT{Ql9HSh$ZY+b7#ODgdAJqXzB1XHjF|Wg36zoh9Q*cLB0{(@r#@uFFHOBM|15m1o zwD9MUn9up|FE|5#7x!p?AlVU>P_McLY3nTk(#%tvwxTOY`=pl8S- zC)dBf9LV6`@_T%V=1>2cskpj^Ha?^?UfMCzoXk_SIWn$qC7=^Cvp6m16tt*XIdTJ_d%ANAOW4-tuHv`zuv( ze>U3ya!k$JiuHDzLPN|HpNr3D&?CDzfgQ0Tc!~m9SC(dgyC41cUI`jegh5cgt3v)x zY+(p<%yuJkJ&Tcj%KxRSPu4!lD4Ff6abjaMsdiZJeFNP~wF8}>t=nx4mcl(a^5;*L zK%pUQ@~@gtqcWhoGvf6>7awvnq33tf1HjfGr!C7oaR4b^^%%jP?+Xt`8Lg1!N87Yz z{{yaG1q#;1e};ZcE3^aeh+YJ9cNH}@ACx9SNqPkQ$`Y8QZ`X&II>yUGx3USQdFJ(( z^lnLJBPAG3<@XV>^6wD#^P%F66ecEfgU+X~;r}rwi;C`0(MCvzll3@evf2H}^oChezva$UF1FR z_Y;fxS4C=dQeLr56XFU2_kc9cGKS+^ijrIzD zY)_4!mN9b(yDH{MUN2b;Oj&%yX5XH_=CjbW$l-i(hu)E?=PP%4Mz~s_dMq`q+L)TV zWs~C8TJc%c!xFc;^RHm5D#2z0+0U$oHZ}L6k5IF{L6K6ow5#IAPO83d?h};kDj{Mm zFw_U2+XM6PN9yvr5Z5F8tTAS@f5*_0N1hleHVSXGQ#i>_I%|q+_l;(Jd`kV&`41~o z$Ch7aNxGA5^`4+7wpcjSx@RWbyC0Xbc|x3xa=2(b_H4<$kAJ#Nl}h33iUI{Kc;e34 z(DLVe0DASfXslQ$VCjNBU&!R1YTb93ryi$y7m^tBNV~}3>1pY5QjWth6~y3f4TwY& z%I3FQh=U+_K!t&K^GZE+SKr96AKfeb<-(5`#VLC|veo`BV_Z%|}xRY-!=D-?1t5<-^~Oc*SBz$`N1xkw%la z?+9mnb0@HsiO+MN`Bvi2h5kwn|CAm;_CKgw8rU785O?}*Y_3zn@k$fR3S%g1BWh!{ zcPi-^&rD*b9k;!SHpn)`HZ5z#4?+^`>3^8XY@BmOUI_3`L)#;s52|f-_&L4Nbmhkw zO$>kUffsBsS&*$|uj^5xHjYHIIf~RvLHPX>qBDgMT#PMvfK?J6o!sioW_~k_y=M2K z2*Cf>)!|?0tnrBNBazvYbq|)nvG9rq694Z6Z{=Sh1|!s@Za*+k>chUin`h3|mxIKT z{5|D$ndkNu5_2d@c47odxgq{II`9#jGvN8zG+WxBrc^WG5H4}L{6uzY$tj^8OO@BT zAx|y^@50_XIMTpR8%Bu3_?tXY48mnS;6e(xJ+~n3XF|`AA_33Z{Rf%^r7@+lR38lJ z^N4=4LNx$aJwugjHmb0@PW{U7fuC=?rn|gPM1KE-w+Lo)LRJaJz*`V5Tq@Q0*@1?mN z=|U^2?v3g~R@D#*QpE>M>#w{glNpYJJi{)mg^i$=kQ%*nPg5odV1ClinB5dS*a`)9 zSdm&xKLtAYUNlOwgMinHkYqKRjc4ixScXyd$Sq~)nV^iVyQp%}bFE}ttfGXyxet)K zYn51Qj9h`~vxSot1nmH9dA5m%_B$%_Y-4DM2V2N^MRCR-0|%dQ`sjANW*4~OvPpK? z3XHE8DXcQ{9v9_-kw?>3N2hao3j3t3vtaR6K&3;9lPAPcuB%nto#+%$P`7_k{(~;{ zqb_nc%sx;w&0Yc@B+z!@&fiX2!TI{(5=Wg5Kp{h)I8g?)2@2n;y=0Ra9wE>FmAE5M zU2UH_`P}niPjs8OihW-P+^)29kyh2Li2~os;e(67`BJN;FTI*liEU6m`f`a14vBD~!jtT{v`85f`q3q-_yD*irGOihE@XB|L-ME8ylt_)Cu;6it zc$;QnfW$e*FNsV0DkwfKZ-wpV$JXuesu_;XsY6KI9@gJ1KY8OC$9xhrT zq-o9s;y&IK`iQ5r=X(OKjGLVShb}A#F|XLSbUQf4G7^WU?!7(+v5{<;vQ!>sky9ET zq8CNaOkPDsQtAwaBgq&{b+vB55biZj`oZ$b;)o=j57`ov+MNCx4# zoP|R)G_Ix@_3tN+F@?9T#kF?Uz;i@u5cWo(S`lm;D3;CqdjzjU*k%L}XSM%Uj4Jy?+iIl%bj<&@7Zw(LIMP139 zrO!4TPk`$QR2`_CC)oUQ5k%7$;9>nbr-eA*2>~t+h=bl0zejA8!hCe=AklQnIC;VP zSd_P>`XB&p+wx($c1QFKaMNQAlL;06_ow0)ky!T-AyzU}TuXwkOy>#H7u9QcZX>smDSgaB!dmjq1aer#E6FOo=# z3eDf)yxUehB(I~OXb23ryi1U?sO~x7U?H^-Uh}snCnsZ^-Ay4ildn91vtqKbzI>Px z_Cb;=HoXbv{n{*Ghy?%!|z~o)5<#k?ERuM;D6@-dw!PlN>JM9kv<~bS0gG~Mt7Bm7c8CRF(WC;>3 z=PIrvjJIsGu>?>&$hoS8;0>;IcAfmFw>s%+c^Jhe5mQc9Wi(JpNHU&{cRJah1E2v5 z;`ur;=*~D|vN)H_g3)@EJ8_B-(|#>DkfHR${%yJlv>QefQOaAhCdBK|sb;pyvDo6^ zWD*;jYu76)gZ~EhENj)3{_FA43izgDl4iT?F@S$ZUp%Qgtqy0gM;C1RXB2G^QXk*#1$j#h#&CDmNXT8>WmJy~*{3AsMAoojBj43s%4zlwd9o z?m&<0Xp#^YbibYRf*BWZyOz-Hq%CmU*U5jK+DmHchND>hC0QUN-JCIW<=+vJGUfu-QSj8w01Wm;0w^YR^ zZy36+&C7OoetjexyTuI;yp<-i?TMr8zl4X4AV96;Z-7{KYsKM`xc@btM{%&?^Tm6= zz|CxHbLxiL;`cHSA3EJx-d3CAxojv2uNg@TW9t#WD-~EQ4KGm7C=e0h;Gz6EwHzQK z(S{8~qUnqqv02SYr4<*0N+9PAmO6%|hXygnq>VB1>Y9rBshwFVxfO5zLJ&dZ8URoK z-fhleI>{$ng0cb(!wnUGwY8QBY(kp6`8_3bn7Q}wfhlUgK1pEKAR-wR5u%yfl_U2%9HllWS|Gxs z=o~V*fB?c_81!4JK9E7~EzY8jo13#Hc>Q~;95g%N>-|4RT(t{{xcBAZlj)c3(eE#C z3*POJf@E}*c{Wf|_&G^G25V=G#gr{GOspVh{uQ5S?6HhZ<%nL0!wK`^7O z-ZQPK>@P`1miqUL`(qDVJ%yc%{>~^X=(|0Pw~1NT)s}-20GxtI9o0|)g3S<4N{fyB z%4|gty)!%lhadD*YvJI{SplpYi?qymXdzzP_o8N1O3~DROtsMBBRrxprUp*E03BqO zh{Vn#0TLhC2ePKg+>R-kAXWX?NcLmC*TyepmOwIv=f^dCo=uC}uZKlcW|jpLkBzev zk=wjD?Sr_C0(QnNx^`Gg`m76-n#bTk8xDh9Zx&6xJg61lfAP$P%LpqzgfFYdi}J## z9Sgl@fGgsTv!yM5gdF}14a8}}K$&6gGT1cyrRAKNAUD4D@StI<^oYXj}$AYO)%^s?kaDy1k{ULhD%a`nKQ2uc=Z}>Sj z7!_Ri+ARrfJ-BoIZAvjP)FaEe%~O$B6&`z%xVg+Z*63zX4%==oGG2TP67mPciX+sn z5;3#>EJhlgME{g^8La=(3B%CmY5H%OndIymuF3NtWqxx1Wv3=EJ(Rzt*f>Z%)svJl zbv-3D_WT#2bEs3Myu)zu+jj>ceDYWPegzTeG%G-gtwA>?$CBsMFZV;HQ9lCHlH)95 zwzr*+6yi|UVbHtBkhW!o&x-$u&{JO{BfF;a9LmjS)KM=zI()B!C(xUiyu&h=C2t`2 z-`<^-FMa^oXn`v-N)=-)udK3NuTh0!8BBG&Ru`VV+i<2#Ve<+O*msTIQsqe6RCV_zstO!zVrk61u-P6li9O9vfZX333p& z<2~-%D7{f*m$1y8B>K^EKzYmx*u6E<3PY+BAyny>CM6XE4Cp zkpVtl8}OTxi7|@$3Wa|^xT*;lXXrX&{3rrg&wB1w(7jcnzW0PuFyrx&E8DmwG5Xeg zVR^p|pKuG>2-~Z_ZZyO3mIqhXPYp>wsXFX)vVTv2gKME3$A1YDC8)pR|1dpYV8`UH~e!G=9@S| zo8(cDxwWwzecaj<=)X3gzSsv=!P{Y2GKeRs6kW&1dvR4g7T5F7Qvk*ERph_04&BC! z>9@PcvgmSnZ~rn7%u~I62O#@cRUb7GdJp;|QO1tvsO5Pu%=HCb%{zrugK9C+JM0E^ zxoSQ|Q2=wc5SXa>?__RUv!dDe*^t#uOj@Wqho}*q-;*0CvIrQx(H?$&oE=di_SVl_ zm*O5r{<1R59m+aM08YNya_K~4@hx0AWU_DK=X2ATd^6C%oNsZ?{~re4pbVxD1L&*p zqDzr)zYwQpwP8((u|O3ED80)2jaMDy@C>HM4d~y&g+Xt(90On00gk;QgW4!L8e@A3 z3HT*{jF5=*3V`$2A>oQe2>FmLqRi{3DB$z#>O{^O(co-vC>>yAu7mx()s1qK7A(-4 zfkY|1;(&7D0#>#wCxrq289DgBQWD?sb@vKmBm`Ac=+j_%;4lBl)I;vi`wkPJ)#zI* zU7EnRr?9IS@;?2(OyP5K_>AVFIcIT)Rb3JxUpgifm=vD!dPFSPGZu$=N_j=9I$M^n z7SIpIaHv!C(J%{UZ6g+HX3q+HvVSy@_a~ikcoT_*HRJR0&I*;(95tE`|BezYn2@dybP*I8eVXl$$CfliN_fhT@Rs5{s{(ITyW1{c#_%(iUsh?XD=eJ`kV zDHu%XNk`wvrW+hbakP`LPJf!@3YYJO0c_wr!xG2Jg-GRNb~oF340qL$ZzA_=nRwls zBGck=dz_xi=G$XkRgTjrSK55Lpl>rlu$2tk__J9j*<9B{YF_i4&SDB`(9VyMSvBOIEZBjAt4YZPPoW^Ul zda41g+7w3X+B?Cb`hgc!P8uu;$8Pm@&t7tyIMdtTLL#-LRt|q*&hqHHmZDOLkdEd( zuC;b4w5%A6we+800Wkt#_~h>-Mo(#Q`*g`Zp}?8uAOpLMn<|cYMnD-RuwhLYV@U(V zA_fu>Qrvx)O`m19s~5CL`U9`lyaE=bN;wrBuv9`pb=CAW<4aR6;SMlu7q=N~?xwiC z+AW*n5l&3EI-2!G&Z*L4TNp4l(Ob~&?Du5)RZ3ZR)7pW|U$2a7s?4#v@`&2v;=J}7 zi@?~zg3WwZHU&n?dhTIFw%dTJ;oE|i+JMlk1HD9OlrI%5R5m`;$I9+;OYj(44+aS3gkkpAQmuwHf6Ri*JTNTSG81@P8Jwrw6LKD< zufL{fS7;Aa zsU@%UM672_+E&`Ll`|qm(#ZghH%7iE6y~CE83l zS19tu>RJ=vFokr%_?QFRPBUl;+-e9@Z-_jhAlV&2PHuEY+-Bp*4NZ7*u$e_An7zk8 z9)A0@Sp+QReWmn{b;M{MfIZ?i(7&=OL^^^&?$)rcI@V@R-}|mH8_x4SAUH?o4=cK- z7+O-G;w`e`@p|KFfum_Rr~0rKkq5dLDhWz495adai3zU}Ejjw$xeUc=DsSgrb2wBA z>b_y=d_E-4*D3c{chK3Ju$#;Em>$$%th_FGxc2ISo&z3H)Qctv7WvPaXBw(Gp$9^x z5(MhF8KN>>bG!fOz1k^za zaQvEJe>@e1#l4o?NI%mjW%9E@ArWNu$3X=!6aiT*2*|^!!msGO&!SBQ+km_YkwD(h zi37AwEbT?trz!-j(kBcHWktigWwPRkxh9WaIrsCIU>ElJ4P!;GVbYu@3EkF-3yigA z^SYn=#fQ;wvf!NWMjV3Xnmcyutmpo)y@{!gjt#ylZU9oC3MdY5CDuqMDO5sPP&iWb z?$u=Wj!j)FCrJpCV&kp6M!2^jfhcHlJ*5kJGEE^wwjU?M-q3VKao3R!!bE3fzYsuq zGDIP8JMcHn@3xvj{zI!vmLYIN^JmFK$RH7ebYhIDL*$bHXSfSjsJAvfA=96+#B`08 zsGQ$r$rLYMK)lQ#aePI58Q*i+M+qgkTSovGNirWTV_Ufsz=e8p*>DAo(B!7%d5?yO zE{HrfC3x^4uY|*HsB>)t_XV^s2!L%SthN9|zd;{-0(@Z5Cm9GJ+vG z8)F2F<%R;Hn*==uttvr-CT5$^%Xyyp-suSo9Qv64A_$QcaB(JW5Feofi8El<3 z)=xt>CjJs!lM2nN=JjDI5~t{|b~s|Tn9Kc;DvSfmm63s813fIqJu063or=Jydw6K^ zMEGg?bG9(yF=Y5PvlQkpGr0&VO8K~`+rd|c%wLY-m7l4MO+mjE*x{eHhoIyBe-XHu zF}9z%47R?Bq*m_B7ERfD^t4Os>szdWa`VTTL#P#U*u}?*-Vh^RIjQ~E#-CjL5WqYW znrYAZTj_~FU+Wx_Z&?cB*aO8rK{_WL%$EUS8>S{_CvY8;Q7}vf-uH0aJ_@UfGCYl1 z^(^zBJ}rbghInR^KNRGTi;QEc`@MOK==A1?T0)0*ePA=4IHPYK&G289Cn~lpp7ofC z0sU(QlT`30bpdMDybA^HBBBSBS+N-%ZIG@&LXrV!2l(+wzNGEJgLiC5_t8LBhD!q> z>JV6&K`CBNDvZ!Qa+=pQ;NF5+AAw0~Ma7DF2P;5<9(y+i0{}<6{HqeiGCItErR_fc zOEnB~Duo*vvNo(CcXK=(e7sNnEdg8lM{fuCK;(MO{ua9QKz;-Ypsq7FK|QzPXcNH? z{R5o}M=El@F7Qk+O1peHX8$n9PJWhWm9^(D|4YyD@Xmd!zKJ+g*9E&UTMN=rE$0VSe z4pl@Mj&pR{9&Q3Jk_Sy~Tz6zKO#1fP>PJh zA15h`sdzDkxA$d~F9hzX-25@&ii1PYcC`b5e5cM_3Tq&NR^uM8m=#}HH>vt}MdT2l z?;ffIBV=cuVTAI$mhTupcMwppx;$LXYV~SSbif7GQ$?rjdTMNOKH((BBPn;|;P&wn z^TGzVfcHi7-qSPjGMbF_n7N;rbKKHHaN}wZP2s0_%Cz(4Pc>(%uaz0(?`KlU8>@v7 z=@oR(CIdwoY%#?By=A)FeBysjbG>)7zbDQ*!Vc3==|%WMr=MF znX_iOO%wZI@(NTv4#{PEBE?G!JSCR3<-ekgvJ-<@>In9JmR<_O;zeVHL^k5O5y zf;MNr2+SP1L2mhw+nCzXt{$4sb;~lc_+yo~hdY#gw> z;inNhw~(Kq#HuO(ze>??dRLmIY)~UaG*8t&XW~}8;6-x1l+jlolE8tiojCP)2Rz}C zA^v0}@gpWqtlRtB_ZcJIQYY1vQzbc4d722`OK2&zfOZ{X;21g?p0hhaAsg7S+JE?E z;xhUUqwX1$uTjHjy~b}E7MzEt4>}D*@F?z4S}Sa(E#ppJwx~n%M&k|Wt^=B6!fPV0 zrc0jQz`xV2$-V`&cEAbiLRF+wPU+XnlLgT?s zDY!V@Ay=K3@su9%bh-Yg+|89SQUbJ+q`rWn< zj}n2ul21+L2aG%W`K3xe&asatK~#sDaBf@>RL_4-HH%H1tLM-(lhwUp=V5<1zynC^ zih{45BnH_%977ymDHAz{{ZtU}{uN7|aty@Mpa&7;OA3o6(2Il<2GZp#>=7 zW$DCI9n%0+*?0>_&yzo`E!h%f8YJ0!)DUYEw2&KMwNO6?xff^C3Q(9cpl1}>AK>?y zUajhuWMobAwP7(B@QK;K0#~&j+KgNDu|9R?2j-vEa7^qUV-Y;)Sc5gS*Rv3tn1w-_ zzPu+-?ta{;8HpkHToMqZYiKzEdik(|F$oVwflBzXSv%$@0dS9H;r8N6C}uwI%E zcBXU|aBXvfdn@;8pwON-6sKo(v%9cyw2v{A<&d4%>BYgU338kJ>jqL#fJjDl@!I`O zFOWGRK#f+$N&cz@J!!TTiL~eVHRC*r$c%80hN=N}(ge9`*|MXS_8LQT(VTk0&tP7G z<%sVGg6#=-nE$tR@`L@0(PYnJ|GcAY)DwG7`VqN(p%Yv~l?w<_FZaU{Gb3>*iG%oO`S5XA!M7OH&9|F)Y$pGI7CLki(bZjK1NCFu^>NxD9N{*_(OKJkZzV z-#CDrP^##6k^@6xIaz2>L1WnmNnP$Cq7F5;qfxfc>9gX$AO>q&xKYCQrwIg9q0Vx6 zyBD%0>OgV^eeM4Uj;5cFN{x!jdx#gCYK03^W9&v1pzm(qH{dbIrhBky5Fi{$!(1a< zU$4?%Bub|$HZ5Qsw7YMYdHp&c#nVa6g=xz0z?~cT?Y$04VG|_v8R){l1TJYAm8>>x z2XdK=b)*gLpVs2ys zNLF|6c!;o4tM^N}&`rMpVZmwtgc9dnwu^*aM=F-yWYB0FgAILjjq)Q-D5O>?ajFN7>MGY4 z*ANb}u<_!|P~T`o-1(~u4`gUiBcHB_R(TdCfab|_bWHAKZ^C62!&BOLdwphIAF?iY zMHvy_3Bc(#r1%ze?h$W zG$ZiUSJ{}5Pa`xE06F%bE`wog2-*9n5oIx7{k_AG;o)tePEwPXr>wUpfI8q?0=UbN z0d?v6TDi4h-|y;2*-he6lh zlvnHjIA4Um>BiP~e~WpvN{cP&Gc;9whmB2V<<)hYk#vL$d*Veg^!E!ZsP)Z|=KBLF zUbp(XQ4Vmp`-2iej0!wxw{vrL&U_ozE5A&G?E#F~hc*Z5_cChyUGF%k^!k!EgcJ z5EJnXV14_~H4i@%PVn2Fti!hwO4A5g)^G+p;uTaO`RlL%*Hs+H_J3uUCHtOJd1%r3 zwZ;iw77jG}vhUdkH^_@zGwQ9+oHR*o_lm^c)WwJu$`E*(!&`xopYGb?k_h=l;qf{* zV4`4vC$Tzc??GDtP%>^bhf_;B))eU6NcHYWh42e0`x+v=77T}h^qR9N!OdKc+y7Wg zvw`Pau~~Da;{|R{z>oHI$Y_1|qKgvtO=h12eQ3*O0ewj}z7y%;HC|FEJRZ?61@Tx< zq|24mOQ%H4^FEh9ifmoJHNyesRciUnUG1Ly-Je?~0pVxYE+m*T#eGFwGG0f&MBmy}!$V4l4JM4pB3i6YGO_Nk3!CB* z{XNHn6l%zSr@IlJRA0W5>I9t1v_C4k^yrCHMV>Sdgr3_BfjOs?qkcpZ3fqSVG)!*< z>ckEw_8tzXw6KE4;GCapl%$E0MgpQonbH&j50XJ;ViIdyvH~`sv|U7)fs{BuQ20^^ zCUEHfUkKUgl$8~$ZYUF~2dn%z07L9BG&1~{Z-8uGM#(0WzfTsv6hDL=1D6r%gpHT3 zD!EW~LXHt%&+f0c82e0x|9Z;M3RcA^bkV8GsGwVz6#%k=z;FepgtSRfO4?Qz9xt4; z)U|uuC_kiUW=e93J3}B{O%0Ih`KQicH_BZ898f+YGnCeD>Yf3zf~(y+c{|DYztA{5 zwKWjlIJ1Jp0nB`GL<<~uE3&gZTnz~GcTHhZ5<@pH+IKuuiiSab|9=C1Dny(hzS`Bo zj^?L0i6v-*n7oKB9S^|T-b87qL8#3H5z1(t=fuK$_cH=NkW)Of$be!R8v9(2fSx-@ zjgu{Q(#*omim|o3QV+hsHU6MHqJ(Oeg3$px z;H=U|g`ruQI_W4!!S)vY*M($(GO~a|;$EN~KXj=CJcN@&J<(W+?C*co(FOl~8qrKJ zQW?Vo>Iy*ylAq0UAS&6c;dT!6`${U!y=}wK{bJwA#0`6WD*#~p2h;v{wG2*<(wnOZ zl@9qhpKTBxadAt~)ZLad31;>+(ip&>F^N1s3pr+7RQqJKq>lCaqVL}GfCx1rB%cbbVT5W>bo7b0yLXbC z3PK}auLyujGPQ7FdxcNUDKT}p zPTO~u+t19tBO`G5r9VZ=fTAEM`6^uDc5|sZEty-^i$LmoPD)2UF9$4$6JkSCdxOT? zT#u1zFRH`hvjnXjCX!u7KYI9Qz9sfh6Cvp3czkqk$4QNR`F_qz5ni(6uZ1 z)H(K|VSY+6QEZq+v-)xM;?)}}@Tj_x(L$jshFd8QcS5Flz$e`|H47^BSn7p9|F2cd zCd0@meHV3%jNRA|IITnJR}`+rDZ%tI;smY%=x`4|5aW@r>i`hB&E!N}XF0SzvEm-$ z*B_B&Pn<0=1pomTt!n1tQ%kBIYoN7I>^JQ3bWd9+)NVUr(iv_+HV6L62KiT>T#l8% zIl}H6Hx2@G8>TFR4Lrq1(XNR$U-98#S}-YVjW4Z8Xd|btiZEsh+*=g!L9PQn#)nw_ zG~-cFAVbw6<1+E2cJQI4z7-X9Xu zSW@doLNSf__}4q?g#J7{wGa-wH-}Xu7-GIV@&@2&1~LH+E@!r9dN>^LkP5OYGS#(z zmZ%@^tZV8Gt9;|2?lL2Of9+XZ^A})0^&Rgd{0nid)Y2Vpm7hhCQiOJ1j2I6|@`YE{ zPV~V~U1cD-E{rG6s4ZA;%CT{wwWF3)eY_F`p;^U>Sz~A2aE;7Iyd}KXp|7u za{*=SQ5oHh442t}BgnnJ!}`G{jA{0nvb-GAW4*2HnT8N_efOmHeF2(+Gy7E$ALy4I z9MgOocllu82GWD!dH*O(T}X1c&o3=!2;kcQwkmRW_L$0uQBaV9_fHjAcNi+zINAvRUTM8bom%4P zXAcf1l<5wl$%YbDoA&qVS(&+cEjm_zUd~(q5R}ctDF&uQ87D6`N`&FerBn3tcfFZNlS4KKVfiB?37G_@{KkoE@<_YfZtntAbj5$ z!g7}6NA!C+>4A}x?gAm$0k{-*{QD|K*buTt9WI&9fFmYJsG_5JTZv!F;|K%%7DR4r z?&)9z!=z|7xqvdf7!BE;$&hDzw4VgRiRd>DHo{`Fm;Cg~fzqMBUVxF85Y%_RX%yES zEu8BMaHywp8i;YZ(mN>AtpsvZzL5doGfV~!F<5NSK_SRzF|#B1Eo*6m{Qv*}0Rh+|Z-hEA6<;xOeOC24W7(F{ z-UJLM@vKSuaZT@txY6JOvM21M%u@!%|)&E1OBRRc9-(9RaVzl2)bI=TL}KXKt^ z^^V6ORKNfea?nyxgLEJ<2d*BwyBf#-|FHg?Yjr;55i zondAZ@`70NZ#GcoW?2v4c>|{53a~7+#;Od=*eL8ZIiXKaXW9?+a}9jgkwFfQP;IXS ze!+fPPM~11ApRAG<%oR0g*qHwe%z~yM%4mQP zL-E%3#CPhvrg=B<{Fb)kP=6ZpXEhUVRG@@Z$`>XJ#rpjry6Koq?DO_3@KXCftLVvB zcT&^K5j!l>9c;5ii??wuaeJ!12Q~uGrclF%ElJIRPuezsetv=zCapTuh@>H8gw8cA6Rx%YS5B$!9JUZ_Yhv60yUBFUmiqyb5?e$PCb?%PhRuHW-2Tw7 z=NX&3n!?%O)cc-9_6d>`q)abVAtR7T%{n*4$o={|!I289fUL0U$lYOTRqQkqz0p74 zt5@;-!B2y;vwaZ6f}#4>r~hDTdR$bDaYM?Vqwi>!eQoSr*HVx`V9&3)G$ zo^S3B|1oyO?|=s%2t(dIETw|B1_|@o08XcE-EO+J@SB?_vd8Y!i&|c;R5@t(hQp{m}~McF)CIoa9_8{`?+YWMkXC zPJXl+rj@Oz%Q?9GqFlB*gbke>F6~VF}2^3u4hXB6%#>E*@hy znE_2bnlV3avzimUjfXZui#*I$Mjo-iZHgLhT(Phd1r=X&bj9fVR`vrSnAFHq$XQaAe zzISEyeAOn-j|!tAKOM%u=0Yj(HZe;ZM?b&i4-3JW9=EpYoU7o3}A z2)74C8=-W2{u$$Mu|(DEyV5+n+40?vUImybZKKZ<%MWp!T`o*pgvQC^gR2b*jwH&z zTWHti&oU`m0POl~gxo3$YD1%**4ovWaI=C%9lwQvl4W3Y7DAyShSbvha38d2%D zC~3KFjpEDiciC(}?j)tDTO9~K-75>Unjgk{{0ucwJ~2)IEsQy6ae6zLwGkrHlupYM z<=0{B9m))1YPNTq{<;zk-51&y)oV%d@E_FXcg_ET|z>e&&OmoVC%ldO= zdht3&bnRZ4NK0p4f@ku%H`)vT+g{2z-iwX4X>B|Qwe2%>hKphmv6U7s^;f7Pt;kws z?pwI%HyF>nSxCcl&Zc^|!aUTFZo>1zDwxFLHy+9rBe!`Ax)1c<^znlj+1m+T+6%xA zONpCz2wSeAM#DN%^gnBPpe*OufB1&L{q`uH81t&+{xLH5z?yoXr`c-XhOoPMp^X%y zAS_{7XP&HaXg8GU7x_$#UAkUJjjx_Y8rJQDcr)9k1b=lf6b3>@&ro@zp`R>1A~VB^ zA4oaH4f~3Hr0JGBdXwU)D`1@|WS4{JiDzj%eKli4CP6qT@izuL56tSPhlrAwF`Y}S zo}srCXkFw&#z_eLHu`Ov6^ycx~ISfhyhM- zdQ^oB^hn4dnees#&?JsI&Yf>jzHh`WEcL0nor8fOKi|p1v^`g@aBBym9P-P9Cy_>R z?qEls8=Ql)lMt4c;}N*g0TdcBMm&9@<9uZOPuw4A-b5xik2!h0O=tXO7>auMDE5Jf z49Zx`C2*V|g?COe#rPZBWqTa}@OM2}FYK`Oe_3{#+~9MMhnPE`fdPhix}rlyQv@5M|%6ec%G6 z)M%3-pg2hzqPgYS>9_yIraVF}AE%ybuC(d3$)^AJ7o!Xx*4HUME3OCcZx~-8y9i2lOcST3?euQ96FxEe>JN*xhfMqU)ZJJHf#;&TGHL z969}G;E4-I8Taw_YCN;p>WfvhNj}O&cwb(~)7}0{?sb0EQVpMHZ+B|ymlA^yM#h)l09`u#mczHmFjbX*xckG|tS{jCW(@@%Ou#}Uh z%OT=-6^m4>8!i$em@yF_%+oQ*n`$m7hO!#oj;d8SYL^I+Z_^uOb6x{}?~o1iY$?v2 z>)ql|BEAX$000024<5qc*ji4pdKiEJ0000000mY5&YQvhFeLPK*mnBa1&tC4vbL~l zrM0C;@SZL|5`IIhB&wFhb1X`1GmU^^m`=3|#1%*ieWNd}CtVPUky1RL{s;}4X%CM9 zX+SsTj;~{wfV09mw`<&k)!rXSzU*FD0nV`*WKo%m(P)g@%ufP|+I6~Qgqbj=dWf>K zXLUPyWBna%!`QEMs9d};*D|aRd35O~mM{POsI4C1Jyqzf248Sw+H-cd#sc>BkRjKE z+;YN*fbHQ=5v7m_;cz6@%Uu29$2`Koi!T66?6hn?T0%1!shRgCZ<8Ig3~rI*4}JeQ z#KW+CAD0W7XbCth#sZ5(rXp>k6QT2Cvm(NFcY)yB?M6Ono~UFUQbkjU8yZdrX8cIk zoP_ub;<`0u(xP2&Sbcc*$9Sipcn;FeX`RIap0r<545X?Vyu zI|9u3NJ=080000A=-vrvvS}THxAk1}q^d>q?cRYWvy8jR*N^Ms&1aIM7R=C3M9Xi+ zgSJM&)A?cROzt0OZoU>@yqecaY+-AzHnw{6SWJPf{b2(0000000000004v)a>)Py literal 0 HcmV?d00001 diff --git a/mulicache-readme-cn.md b/mulicache-readme-cn.md new file mode 100644 index 0000000..9d21d83 --- /dev/null +++ b/mulicache-readme-cn.md @@ -0,0 +1,127 @@ +# MultiModal Cache + +为满足多模态的性能要求,我们在 LLModel Cache 的基础上,开发了 MultiModal Cache 系统。MultiModal Cache 增强了 ModelCache 功能,架优化架构,适应多种应用场景。 + +- [MultiModal Cache](#multimodal-cache) + - [最新动态](#最新动态) + - [特性](#特性) + - [性能](#性能) + - [效果评估](#效果评估) + - [参与贡献](#参与贡献) + +## 最新动态 + +- [2024.12.12] MultiModal Cache 系统正式发布。 + +## 特性 + +| 场景 | 数据类型 | 图像格式 | 数据隔离 | +|------|----------|----------|----------| +| 文本对话 | 文本 | 不适用 | 支持 | +| 图文理解 | 文本+图像 | image_url/image_base64 | 支持 | + +- **兼容性**:支持文本和图片链接(image_url)和图片 Base64 编码三种数据格式及其组合。 +- **数据隔离**:支持多模型数据隔离,允许不同数据模型在同一系统中独立运行。 +- **模态隔离**:支持同一模型下不同模态数据(如文本和图像)的隔离处理。 + +## 性能 + +我们在生产环境中使用企业级数据库对 MultiModal Cache 进行了全面的性能评估。以下是详细的性能数据: + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    请求类型Cache Hit总耗时范围组件组件耗时
    TextHit420ms-520msMulti-Encoder (Text):~300ms
    向量存储检索40-50ms
    关系存储检索60-70ms
    Not Hit300ms+N(s)Multi-Encoder (Text):~300ms
    向量存储检索40-50ms
    大模型调用N (s)
    IMG_TEXTHit600ms-800msMulti-Encoder (image+text)~600ms
    向量存储检索40-50ms
    关系存储检索60-70ms
    Not Hit600ms+N(s)Multi-Encoder (image+text)~600ms
    向量存储检索40-50ms
    大模型调用N (s)
    + +根据目前的评估结果,Embedding 的推理时间存在较大的优化空间。 +**说明**:使用嵌入式数据库可能会进一步提升性能。 + +## 效果评估 + +为全面评估 Cache 对模型服务的影响,我们进行了端到端的性能测试,ua 比较了有 Cache 和无 Cache 两种服务配置。我们使用了 5000 个测试用例的数据集进行自动化测试。 + +- 有 Cache 的预发模型服务:观察其响应时间,预期 Cache 的引入能够显著提升服务的性能,降低延迟。 +- 无 Cache 的线上模型服务,以获取其原始性能指标和输出结果。这些数据将作为对比基准。 + +为了确保 Cache 引入后的数据准确性和一致性,我们比较了两个服务返回的结果,验证了 Cache 机制是否会影响最终用户收到的回复内容。 + +与原始的直接模型调用方式相比,Cache Service 的调用耗时数据呈现出稳定的分布特征,性能上并不会随着模型参数规模的增加而受到影响。在传统情况下,随着模型参数规模的扩大,模型调用的耗时往往会上升,这是因为更大规模的模型需要更多的计算资源。Cache 服务通过存储经常访问的数据来避免重复的计算,从而一定程度上解耦了耗时与模型复杂性之间的关联。 + +![cache-service-cost-time-distribution](docs/cache-service-cost-time-distribution.webp) + +我们对缓存命中的耗时与实际调用模型的耗时进行了对比分析。实验数据表明,在集成 Cache Service之后,基于 llama7B 模型,缓存命中所带来的性能提升超过了 40%。预计随着模型的持续迭代与优化,性能提升的幅度将会有更进一步的增长。 + +![time-cost-comparison](docs/time-cost-comparison.webp) + +## 参与贡献 + +MultiModal Cache 是一个充满潜力的开源项目,我们欢迎各种形式的贡献: + +- 提交问题和建议 +- 参与代码编写 +- 完善文档和示例 + +无论您是经验丰富的开发者还是新手,您的参与都将使这个项目更加出色,同时为开源社区做出贡献。 From 8753cb3ebbd73b92b6d4a421e4d26752fb23a1fe Mon Sep 17 00:00:00 2001 From: powerli2002 <845160705@qq.com> Date: Sun, 22 Dec 2024 10:00:28 +0800 Subject: [PATCH 80/98] update requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 63899e9..531fd82 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,5 +14,6 @@ redis==5.0.1 modelscope==1.14.0 fastapi==0.115.5 uvicorn==0.32.0 +chromadb==0.5.23 elasticsearch==7.10.0 snowflake-id==1.0.2 \ No newline at end of file From e035f8236b15081dbb0cc34d6e027152a1a72a7c Mon Sep 17 00:00:00 2001 From: cjn395973 <1046783533@qq.com> Date: Sun, 22 Dec 2024 12:29:24 +0800 Subject: [PATCH 81/98] support startup with docker-compose --- .gitignore | 5 ++- Dockerfile | 8 ++++ data/milvus/embedEtcd.yaml | 5 +++ data/milvus/user.yaml | 1 + data/mysql/init/init.sql | 32 +++++++++++++ data/mysql/my.conf | 9 ++++ docker-compose.yaml | 70 +++++++++++++++++++++++++++++ modelcache/config/milvus_config.ini | 4 +- modelcache/config/mysql_config.ini | 10 ++--- requirements.txt | 2 +- 10 files changed, 137 insertions(+), 9 deletions(-) create mode 100644 Dockerfile create mode 100644 data/milvus/embedEtcd.yaml create mode 100644 data/milvus/user.yaml create mode 100644 data/mysql/init/init.sql create mode 100644 data/mysql/my.conf create mode 100644 docker-compose.yaml diff --git a/.gitignore b/.gitignore index cd9b201..5d4a0f7 100644 --- a/.gitignore +++ b/.gitignore @@ -142,4 +142,7 @@ dmypy.json **/multicache_serving.py **/modelcache_serving.py -**/model/ \ No newline at end of file +**/model/ + +/data/milvus/db +/data/mysql/db \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..c8ed488 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,8 @@ +FROM python:3.9-slim-bookworm + +WORKDIR /home/user + +COPY ./requirements.txt /home/user/docker_requirements.txt + +RUN pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple +RUN pip install -r /home/user/docker_requirements.txt --retries 5 --timeout 120 diff --git a/data/milvus/embedEtcd.yaml b/data/milvus/embedEtcd.yaml new file mode 100644 index 0000000..32954fa --- /dev/null +++ b/data/milvus/embedEtcd.yaml @@ -0,0 +1,5 @@ +listen-client-urls: http://0.0.0.0:2379 +advertise-client-urls: http://0.0.0.0:2379 +quota-backend-bytes: 4294967296 +auto-compaction-mode: revision +auto-compaction-retention: '1000' diff --git a/data/milvus/user.yaml b/data/milvus/user.yaml new file mode 100644 index 0000000..3638d96 --- /dev/null +++ b/data/milvus/user.yaml @@ -0,0 +1 @@ +# Extra config to override default milvus.yaml \ No newline at end of file diff --git a/data/mysql/init/init.sql b/data/mysql/init/init.sql new file mode 100644 index 0000000..8e34c0a --- /dev/null +++ b/data/mysql/init/init.sql @@ -0,0 +1,32 @@ +CREATE DATABASE IF NOT EXISTS `modelcache`; + +USE `modelcache`; + +CREATE TABLE IF NOT EXISTS `modelcache_llm_answer` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT comment '主键', + `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', + `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', + `question` text NOT NULL comment 'question', + `answer` text NOT NULL comment 'answer', + `answer_type` int(11) NOT NULL comment 'answer_type', + `hit_count` int(11) NOT NULL DEFAULT '0' comment 'hit_count', + `model` varchar(1000) NOT NULL comment 'model', + `embedding_data` blob NOT NULL comment 'embedding_data', + `is_deleted` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'delete state(0 Not deleted,-1 deleted)', + PRIMARY KEY(`id`) +) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'cache_codegpt_answer'; + +CREATE TABLE IF NOT EXISTS `modelcache_query_log` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT comment '主键', + `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', + `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', + `error_code` int(11) NOT NULL comment 'errorCode', + `error_desc` varchar(1000) NOT NULL comment 'errorDesc', + `cache_hit` varchar(100) NOT NULL comment 'cacheHit', + `delta_time` float NOT NULL comment 'delta_time', + `model` varchar(1000) NOT NULL comment 'model', + `query` text NOT NULL comment 'query', + `hit_query` text NOT NULL comment 'hitQuery', + `answer` text NOT NULL comment 'answer', + PRIMARY KEY(`id`) +) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'modelcache_query_log'; diff --git a/data/mysql/my.conf b/data/mysql/my.conf new file mode 100644 index 0000000..8594fef --- /dev/null +++ b/data/mysql/my.conf @@ -0,0 +1,9 @@ +[mysqld] +character-set-server=utf8mb4 +collation-server=utf8mb4_unicode_ci + +[client] +default-character-set=utf8mb4 + +[mysql] +default-character-set=utf8mb4 \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..e079df8 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,70 @@ +version: 'Beta' +services: + mysql: + image: mysql:8.0.23 + container_name: mysql + environment: + MYSQL_ROOT_PASSWORD: 'root' + MYSQL_DATABASE: 'modelcache' + MYSQL_USER: 'modelcache' + MYSQL_PASSWORD: 'modelcache' + ports: + - 3306:3306 + volumes: + - ./data/mysql/db:/var/lib/mysql + - ./data/mysql/my.cnf:/etc/mysql/conf.d/my.cnf + - ./data/mysql/init:/docker-entrypoint-initdb.d + restart: on-failure + networks: + - modelcache + + milvus: + image: milvusdb/milvus:v2.5.0-beta + container_name: milvus + security_opt: + - seccomp:unconfined + environment: + ETCD_USE_EMBED: true + ETCD_DATA_DIR: /var/lib/milvus/etcd + ETCD_CONFIG_PATH: /milvus/configs/embedEtcd.yaml + COMMON_STORAGETYPE: local + volumes: + - ./data/milvus/db:/var/lib/milvus + - ./data/milvus/embedEtcd.yaml:/milvus/configs/embedEtcd.yaml + - ./data/milvus/user.yaml:/milvus/configs/user.yaml + ports: + - 19530:19530 + - 9091:9091 + - 2379:2379 + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"] + interval: 30s + start_period: 90s + timeout: 20s + retries: 3 + networks: + - modelcache + restart: on-failure + command: milvus run standalone + + modelcache: + build: + context: . + dockerfile: Dockerfile + container_name: modelcache + image: modelcache:0.1.0 + ports: + - 5000:5000 + volumes: + - ./model:/home/user/model + - ./modelcache:/home/user/modelcache + - ./modelcache_mm:/home/user/modelcache_mm + - ./fastapi4modelcache.py:/home/user/fastapi4modelcache.py + networks: + - modelcache + restart: on-failure + command: sh -c "uvicorn fastapi4modelcache:app --reload --reload-dir /home/user --port=5000 --host=0.0.0.0" + +networks: + modelcache: + external: true \ No newline at end of file diff --git a/modelcache/config/milvus_config.ini b/modelcache/config/milvus_config.ini index f5bd532..dee6fb6 100644 --- a/modelcache/config/milvus_config.ini +++ b/modelcache/config/milvus_config.ini @@ -1,5 +1,5 @@ [milvus] -host = '' -port = '' +host = milvus +port = 19530 user = '' password = '' \ No newline at end of file diff --git a/modelcache/config/mysql_config.ini b/modelcache/config/mysql_config.ini index 2c63f0e..2c17b81 100644 --- a/modelcache/config/mysql_config.ini +++ b/modelcache/config/mysql_config.ini @@ -1,6 +1,6 @@ [mysql] -host = '' -port = '' -username = '' -password = '' -database = '' +host = mysql +port = 3306 +username = modelcache +password = modelcache +database = modelcache diff --git a/requirements.txt b/requirements.txt index 95e1948..c09197c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ openai==0.28.1 pymilvus==2.3.1 PyMySQL==1.1.0 Requests==2.31.0 -torch==2.1.0 +torch==2.1.1 transformers==4.38.2 faiss-cpu==1.7.4 redis==5.0.1 From c91934ce28e42d5f71dab02ad15f042a260a71d5 Mon Sep 17 00:00:00 2001 From: cjn395973 <1046783533@qq.com> Date: Sun, 22 Dec 2024 12:29:56 +0800 Subject: [PATCH 82/98] update readme --- README.md | 18 ++++++++++++++++-- README_CN.md | 19 +++++++++++++++++-- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 6ebc5f7..aeba9cd 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ The project's startup scripts are divided into flask4modelcache.py and flask4mod pip install -r requirements.txt ``` ### Service Startup -#### Demo Service Startup +#### Method 1: Demo Service Startup 1. Download the embedding model bin file from the following address: [https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place the downloaded bin file in the model/text2vec-base-chinese folder. 2. Start the backend service using the flask4modelcache_dome.py script. ```shell @@ -53,7 +53,21 @@ cd CodeFuse-ModelCache python flask4modelcache_demo.py ``` -#### Normal Service Startup +#### Method 2: Service Startup With Docker-compose +1. Download the embedding model bin file from the following address: [https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place the downloaded bin file in the model/text2vec-base-chinese folder. +2. Configure docker network, only need to execute once +```shell +docker network create modelcache +``` +3. Execute the docker-compose command +```shell +# When the modelcache image does not exist locally for the first time, or when the Dockerfile is changed +docker-compose up --build + +# This is not the first run and the Dockerfile has not changed +docker-compose up +``` +#### Method 3: Service Startup Without Docker-compose Before starting the service, the following environment configurations should be performed: 1. Install the relational database MySQL and import the SQL file to create the data tables. The SQL file can be found at: ```reference_doc/create_table.sql``` 2. Install the vector database Milvus. diff --git a/README_CN.md b/README_CN.md index 5959380..2ea7da6 100644 --- a/README_CN.md +++ b/README_CN.md @@ -44,7 +44,7 @@ Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存 pip install -r requirements.txt ``` ### 服务启动 -#### Demo服务启动 +#### 方式一:Demo服务启动 - 离线模型bin文件下载, 参考地址:[https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中。 - 执行flask4modelcache_demo.py启动服务。 ```shell @@ -54,7 +54,22 @@ cd CodeFuse-ModelCache python flask4modelcache_demo.py ``` -#### 正常服务启动 +#### 方式二:通过 docker-compose 启动服务 +- 离线模型bin文件下载, 参考地址:[https://huggingface.co/shibing624/text2vec-base-chinese/tree/main](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的bin文件,放到 model/text2vec-base-chinese 文件夹中。 + +- 配置 docker network,只需执行一次 +```shell +docker network create modelcache +``` +- 执行 docker-compose 命令 +```shell +# 首次运行本地不存在 modelcache 镜像、或 Dockerfile 变更时 +docker-compose up --build + +# 非首次运行,且 Dockerfile 无变更 +docker-compose up +``` +#### 方式三:不通过 docker-compose 启动服务 在启动服务前,应该进行如下环境配置: 1. 安装关系数据库 mysql, 导入sql创建数据表,sql文件:```reference_doc/create_table.sql``` 2. 安装向量数据库milvus From 10c9b06dd62be0360ee0ef5ce7c436a41b8bd44c Mon Sep 17 00:00:00 2001 From: cjn395973 <1046783533@qq.com> Date: Sun, 22 Dec 2024 12:44:07 +0800 Subject: [PATCH 83/98] update readme --- README.md | 17 +++++++++++++++++ README_CN.md | 20 ++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/README.md b/README.md index c500d7d..4326b2f 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ ModelCache - [Dependencies](#dependencies) - [Start service](#start-service) - [Start demo](#start-demo) + - [Service Startup With Docker-compose](#service-startup-with-docker-compose) - [Start normal service](#start-normal-service) - [Visit the service](#visit-the-service) - [Write cache](#write-cache) @@ -87,7 +88,23 @@ You can find the start script in `flask4modelcache.py` and `flask4modelcache_dem ```shell python flask4modelcache_demo.py ``` +#### Service Startup With Docker-compose +1. Download the embedding model bin file from [Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main). Place it in the `model/text2vec-base-chinese` folder. +2. Configure docker network, only need to execute once +```shell +cd CodeFuse-ModelCache +``` +```shell +docker network create modelcache +``` +3. Execute the docker-compose command +```shell +# When the modelcache image does not exist locally for the first time, or when the Dockerfile is changed +docker-compose up --build +# This is not the first run and the Dockerfile has not changed +docker-compose up +``` #### Start normal service Before you start standard service, do these steps: diff --git a/README_CN.md b/README_CN.md index b2e70a9..8528603 100644 --- a/README_CN.md +++ b/README_CN.md @@ -24,6 +24,7 @@ ModelCache - [环境依赖](#环境依赖) - [启动服务](#启动服务) - [启动 Demo](#启动-demo) + - [通过 docker-compose 启动服务](#通过-docker-compose-启动服务) - [启动标准服务](#启动标准服务) - [服务访问](#服务访问) - [写入 cache](#写入-cache) @@ -89,6 +90,25 @@ Codefuse-ModelCache 是一个开源的大模型语义缓存系统,通过缓存 python flask4modelcache_demo.py ``` +#### 通过 docker-compose 启动服务 +- 离线模型 bin 文件下载, 参考地址:[Hugging Face](https://huggingface.co/shibing624/text2vec-base-chinese/tree/main),并将下载的 bin 文件,放到 `model/text2vec-base-chinese` 文件夹中。 + +- 配置 docker network,只需执行一次 +```shell +cd CodeFuse-ModelCache +``` +```shell +docker network create modelcache +``` +- 执行 docker-compose 命令 +```shell +# 首次运行本地不存在 modelcache 镜像、或 Dockerfile 变更时 +docker-compose up --build + +# 非首次运行,且 Dockerfile 无变更 +docker-compose up +``` + #### 启动标准服务 在启动标准服务前,应该进行如下环境配置: From 82ba867408949bf896d1317a4d17d6c4408b84da Mon Sep 17 00:00:00 2001 From: Yuval Roth Date: Tue, 20 May 2025 16:57:59 +0300 Subject: [PATCH 84/98] made it run --- .gitignore | 2 +- docker-compose.yaml | 42 ++++++++++++++--------------- flask4modelcache.py | 15 ++++++----- model/clone_model_repository.bat | 2 ++ modelcache/adapter/adapter_query.py | 10 ++++--- modelcache/config/milvus_config.ini | 2 +- modelcache/config/mysql_config.ini | 2 +- requirements.txt | 14 +++++----- 8 files changed, 48 insertions(+), 41 deletions(-) create mode 100644 model/clone_model_repository.bat diff --git a/.gitignore b/.gitignore index 5d4a0f7..0e57011 100644 --- a/.gitignore +++ b/.gitignore @@ -142,7 +142,7 @@ dmypy.json **/multicache_serving.py **/modelcache_serving.py -**/model/ +**/model/text2vec-base-chinese /data/milvus/db /data/mysql/db \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml index e079df8..26c8456 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,7 +1,7 @@ -version: 'Beta' +name: "modelcache" services: mysql: - image: mysql:8.0.23 + image: mysql:9.3.0 container_name: mysql environment: MYSQL_ROOT_PASSWORD: 'root' @@ -19,7 +19,7 @@ services: - modelcache milvus: - image: milvusdb/milvus:v2.5.0-beta + image: milvusdb/milvus:v2.5.10 container_name: milvus security_opt: - seccomp:unconfined @@ -47,24 +47,24 @@ services: restart: on-failure command: milvus run standalone - modelcache: - build: - context: . - dockerfile: Dockerfile - container_name: modelcache - image: modelcache:0.1.0 - ports: - - 5000:5000 - volumes: - - ./model:/home/user/model - - ./modelcache:/home/user/modelcache - - ./modelcache_mm:/home/user/modelcache_mm - - ./fastapi4modelcache.py:/home/user/fastapi4modelcache.py - networks: - - modelcache - restart: on-failure - command: sh -c "uvicorn fastapi4modelcache:app --reload --reload-dir /home/user --port=5000 --host=0.0.0.0" +# modelcache: +# build: +# context: . +# dockerfile: Dockerfile +# container_name: modelcache +# image: modelcache:0.1.0 +# ports: +# - 5000:5000 +# volumes: +# - ./model:/home/user/model +# - ./modelcache:/home/user/modelcache +# - ./modelcache_mm:/home/user/modelcache_mm +# - ./fastapi4modelcache.py:/home/user/fastapi4modelcache.py +# networks: +# - modelcache +# restart: on-failure +# command: sh -c "uvicorn fastapi4modelcache:app --reload --reload-dir /home/user --port=5000 --host=0.0.0.0" networks: modelcache: - external: true \ No newline at end of file + driver: bridge \ No newline at end of file diff --git a/flask4modelcache.py b/flask4modelcache.py index ca70014..16f92cb 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -38,8 +38,8 @@ def response_hitquery(cache_resp): milvus_config = configparser.ConfigParser() milvus_config.read('modelcache/config/milvus_config.ini') -es_config = configparser.ConfigParser() -es_config.read('modelcache/config/elasticsearch_config.ini') +# es_config = configparser.ConfigParser() +# es_config.read('modelcache/config/elasticsearch_config.ini') # redis_config = configparser.ConfigParser() # redis_config.read('modelcache/config/redis_config.ini') @@ -47,7 +47,7 @@ def response_hitquery(cache_resp): # chromadb_config = configparser.ConfigParser() # chromadb_config.read('modelcache/config/chromadb_config.ini') -data_manager = get_data_manager(CacheBase("elasticsearch", config=es_config), +data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) @@ -77,18 +77,19 @@ def first_flask(): # 视图函数 @app.route('/modelcache', methods=['GET', 'POST']) def user_backend(): + param_dict = [] try: if request.method == 'POST': - request_data = request.json + param_dict = request.json elif request.method == 'GET': - request_data = request.args - param_dict = json.loads(request_data) + param_dict = request.args except Exception as e: result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) return json.dumps(result) + # param parsing try: request_type = param_dict.get("type") @@ -197,4 +198,4 @@ def user_backend(): if __name__ == '__main__': - app.run(host='0.0.0.0', port=5000, debug=True) + app.run(host='0.0.0.0', port=5000) diff --git a/model/clone_model_repository.bat b/model/clone_model_repository.bat new file mode 100644 index 0000000..10f2421 --- /dev/null +++ b/model/clone_model_repository.bat @@ -0,0 +1,2 @@ +git lfs install +git clone https://huggingface.co/shibing624/text2vec-base-chinese \ No newline at end of file diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index e3be30a..5a91807 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -7,7 +7,7 @@ from modelcache.processor.pre import multi_analysis from FlagEmbedding import FlagReranker -USE_RERANKER = True # 如果为 True 则启用 reranker,否则使用原有逻辑 +USE_RERANKER = False # 如果为 True 则启用 reranker,否则使用原有逻辑 def adapt_query(cache_data_convert, *args, **kwargs): chat_cache = kwargs.pop("cache_obj", cache) @@ -74,7 +74,7 @@ def adapt_query(cache_data_convert, *args, **kwargs): extra_param=context.get("evaluation_func", None), ) if rank_pre < rank_threshold: - return + return None if USE_RERANKER: reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=False) @@ -86,7 +86,7 @@ def adapt_query(cache_data_convert, *args, **kwargs): if ret is None: continue - rank = reranker.compute_score([pre_embedding_data, ret[0]], normalize=True) + rank = reranker.compute_score([pre_embedding_data, ret[0]], normalize=True)[0] if "deps" in context and hasattr(ret.question, "deps"): eval_query_data = { @@ -192,4 +192,6 @@ def adapt_query(cache_data_convert, *args, **kwargs): logging.info('update_hit_count except, please check!') chat_cache.report.hint_cache() - return cache_data_convert(return_message, return_query) \ No newline at end of file + return cache_data_convert(return_message, return_query) + return None + return None \ No newline at end of file diff --git a/modelcache/config/milvus_config.ini b/modelcache/config/milvus_config.ini index dee6fb6..1e9aa66 100644 --- a/modelcache/config/milvus_config.ini +++ b/modelcache/config/milvus_config.ini @@ -1,5 +1,5 @@ [milvus] -host = milvus +host = localhost port = 19530 user = '' password = '' \ No newline at end of file diff --git a/modelcache/config/mysql_config.ini b/modelcache/config/mysql_config.ini index 2c17b81..3d92385 100644 --- a/modelcache/config/mysql_config.ini +++ b/modelcache/config/mysql_config.ini @@ -1,5 +1,5 @@ [mysql] -host = mysql +host = localhost port = 3306 username = modelcache password = modelcache diff --git a/requirements.txt b/requirements.txt index 9a132bd..6e74137 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,16 +4,18 @@ Flask==3.0.0 numpy==1.24.4 onnxruntime==1.16.1 openai==0.28.1 -pymilvus==2.3.1 -PyMySQL==1.1.0 -Requests==2.31.0 -torch==2.1.1 -transformers==4.38.2 +pymilvus==2.5.9 +PyMySQL==1.1.1 +Requests==2.32.3 +torch==2.7.0 +transformers==4.44.2 faiss-cpu==1.7.4 redis==5.0.1 -modelscope==1.14.0 +modelscope==1.26.0 fastapi==0.115.5 uvicorn==0.32.0 chromadb==0.5.23 elasticsearch==7.10.0 snowflake-id==1.0.2 +flagembedding==1.3.4 +cryptography==45.0.2 \ No newline at end of file From c4f925317f3cdbe747d53b053d7af8af57d38ac1 Mon Sep 17 00:00:00 2001 From: olgaoznovich Date: Wed, 21 May 2025 17:06:57 +0300 Subject: [PATCH 85/98] Added handling for bulk insert Added a flag for normalize because the model we use already normalizes Co-authored-by: olgaoznovich Co-authored-by: Yuval-Roth --- docker-compose.yaml | 16 ++++++++-------- flask4modelcache.py | 11 ++++++----- model/.gitignore | 2 ++ model/clone_model_repository.bat | 2 -- model/download_bert_embedder.bat | 1 + modelcache/adapter/adapter_insert.py | 27 ++++++++++++++++----------- modelcache/manager/data_manager.py | 24 ++++++++++++++---------- modelcache/processor/pre.py | 2 +- requirements.txt | 4 +++- 9 files changed, 51 insertions(+), 38 deletions(-) create mode 100644 model/.gitignore delete mode 100644 model/clone_model_repository.bat create mode 100644 model/download_bert_embedder.bat diff --git a/docker-compose.yaml b/docker-compose.yaml index 26c8456..31ed1ab 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -14,7 +14,7 @@ services: - ./data/mysql/db:/var/lib/mysql - ./data/mysql/my.cnf:/etc/mysql/conf.d/my.cnf - ./data/mysql/init:/docker-entrypoint-initdb.d - restart: on-failure +# restart: on-failure networks: - modelcache @@ -36,15 +36,15 @@ services: - 19530:19530 - 9091:9091 - 2379:2379 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"] - interval: 30s - start_period: 90s - timeout: 20s - retries: 3 +# healthcheck: +# test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"] +# interval: 30s +# start_period: 90s +# timeout: 20s +# retries: 3 networks: - modelcache - restart: on-failure +# restart: on-failure command: milvus run standalone # modelcache: diff --git a/flask4modelcache.py b/flask4modelcache.py index 16f92cb..823c66b 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -6,7 +6,7 @@ import json from modelcache import cache from modelcache.adapter import adapter -from modelcache.manager import CacheBase, VectorBase, get_data_manager +from modelcache.manager import CacheBase, VectorBase, get_data_manager, data_manager from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation from modelcache.processor.pre import query_multi_splicing from modelcache.processor.pre import insert_multi_splicing @@ -30,8 +30,10 @@ def save_query_info(result, model, query, delta_time_log): def response_hitquery(cache_resp): return cache_resp['hitQuery'] - data2vec = Data2VecAudio() +embedding_func = data2vec.to_embeddings +dimension = data2vec.dimension + mysql_config = configparser.ConfigParser() mysql_config.read('modelcache/config/mysql_config.ini') @@ -48,7 +50,7 @@ def response_hitquery(cache_resp): # chromadb_config.read('modelcache/config/chromadb_config.ini') data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), - VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config)) + VectorBase("milvus", dimension=dimension, milvus_config=milvus_config)) # data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), @@ -57,9 +59,8 @@ def response_hitquery(cache_resp): # data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), # VectorBase("redis", dimension=data2vec.dimension, redis_config=redis_config)) - cache.init( - embedding_func=data2vec.to_embeddings, + embedding_func=embedding_func, data_manager=data_manager, similarity_evaluation=SearchDistanceEvaluation(), query_pre_embedding_func=query_multi_splicing, diff --git a/model/.gitignore b/model/.gitignore new file mode 100644 index 0000000..08ad955 --- /dev/null +++ b/model/.gitignore @@ -0,0 +1,2 @@ +*.tflite +text2vec-base-chinese/* \ No newline at end of file diff --git a/model/clone_model_repository.bat b/model/clone_model_repository.bat deleted file mode 100644 index 10f2421..0000000 --- a/model/clone_model_repository.bat +++ /dev/null @@ -1,2 +0,0 @@ -git lfs install -git clone https://huggingface.co/shibing624/text2vec-base-chinese \ No newline at end of file diff --git a/model/download_bert_embedder.bat b/model/download_bert_embedder.bat new file mode 100644 index 0000000..e4c26af --- /dev/null +++ b/model/download_bert_embedder.bat @@ -0,0 +1 @@ +curl -o embedder.tflite https://storage.googleapis.com/mediapipe-models/text_embedder/bert_embedder/float32/1/bert_embedder.tflite \ No newline at end of file diff --git a/modelcache/adapter/adapter_insert.py b/modelcache/adapter/adapter_insert.py index 74aa619..c2e2a29 100644 --- a/modelcache/adapter/adapter_insert.py +++ b/modelcache/adapter/adapter_insert.py @@ -14,26 +14,31 @@ def adapt_insert(*args, **kwargs): raise NotInitError() cache_enable = chat_cache.cache_enable_func(*args, **kwargs) context = kwargs.pop("cache_context", {}) - embedding_data = None - pre_embedding_data = chat_cache.insert_pre_embedding_func( - kwargs, - extra_param=context.get("pre_embedding_func", None), - prompts=chat_cache.config.prompts, - ) chat_info = kwargs.pop("chat_info", []) - llm_data = chat_info[-1]['answer'] - if cache_enable: + pre_embedding_data_list = [] + embedding_data_list = [] + llm_data_list = [] + + for row in chat_info: + pre_embedding_data = chat_cache.insert_pre_embedding_func( + row, + extra_param=context.get("pre_embedding_func", None), + prompts=chat_cache.config.prompts, + ) + pre_embedding_data_list.append(pre_embedding_data) + llm_data_list.append(row['answer']) embedding_data = time_cal( chat_cache.embedding_func, func_name="embedding", report_func=chat_cache.report.embedding, )(pre_embedding_data) + embedding_data_list.append(embedding_data) chat_cache.data_manager.save( - pre_embedding_data, - llm_data, - embedding_data, + pre_embedding_data_list, + llm_data_list, + embedding_data_list, model=model, extra_param=context.get("save_func", None) ) diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index 6734ef2..ce9ab0c 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -7,6 +7,9 @@ import cachetools from abc import abstractmethod, ABCMeta from typing import List, Any, Optional, Union + +from numpy import ndarray + from modelcache.manager.scalar_data.base import ( CacheStorage, CacheData, @@ -21,6 +24,7 @@ from modelcache.manager.eviction_manager import EvictionManager from modelcache.utils.log import modelcache_log +NORMALIZE = True class DataManager(metaclass=ABCMeta): """DataManager manage the cache data, including save and search""" @@ -158,9 +162,9 @@ def __init__( self.v = v self.o = o - def save(self, question, answer, embedding_data, **kwargs): + def save(self, questions: List[any], answers: List[any], embedding_datas: List[any], **kwargs): model = kwargs.pop("model", None) - self.import_data([question], [answer], [embedding_data], model) + self.import_data(questions, answers, embedding_datas, model) def save_query_resp(self, query_resp_dict, **kwargs): save_query_start_time = time.time() @@ -197,9 +201,10 @@ def import_data( raise ParamError("Make sure that all parameters have the same length") cache_datas = [] - embedding_datas = [ - normalize(embedding_data) for embedding_data in embedding_datas - ] + if NORMALIZE: + embedding_datas = [ + normalize(embedding_data) for embedding_data in embedding_datas + ] for i, embedding_data in enumerate(embedding_datas): if self.o is not None: @@ -212,11 +217,9 @@ def import_data( cache_datas.append([ans, question, embedding_data, model]) ids = self.s.batch_insert(cache_datas) + datas_ = [VectorData(id=ids[i], data=embedding_data.astype("float32")) for i, embedding_data in enumerate(embedding_datas)] self.v.mul_add( - [ - VectorData(id=ids[i], data=embedding_data) - for i, embedding_data in enumerate(embedding_datas) - ], + datas_, model ) @@ -235,7 +238,8 @@ def hit_cache_callback(self, res_data, **kwargs): def search(self, embedding_data, **kwargs): model = kwargs.pop("model", None) - embedding_data = normalize(embedding_data) + if NORMALIZE: + embedding_data = normalize(embedding_data) top_k = kwargs.get("top_k", -1) return self.v.search(data=embedding_data, top_k=top_k, model=model) diff --git a/modelcache/processor/pre.py b/modelcache/processor/pre.py index 5875294..0c72d40 100644 --- a/modelcache/processor/pre.py +++ b/modelcache/processor/pre.py @@ -64,7 +64,7 @@ def query_multi_splicing(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: def insert_multi_splicing(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: - insert_query_list = data.get("chat_info")[-1]['query'] + insert_query_list = data['query'] return multi_splicing(insert_query_list) diff --git a/requirements.txt b/requirements.txt index 6e74137..44d332d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,4 +18,6 @@ chromadb==0.5.23 elasticsearch==7.10.0 snowflake-id==1.0.2 flagembedding==1.3.4 -cryptography==45.0.2 \ No newline at end of file +cryptography==45.0.2 +mediapipe==0.10.21 +protobuf==4.25.8 From 5127d7700773439c86d8035c08bfd20a4e600ded Mon Sep 17 00:00:00 2001 From: Yuval-Roth Date: Sat, 7 Jun 2025 12:06:33 +0300 Subject: [PATCH 86/98] Added efficiency fixes to Milvus and MySQL Milvus: started loading all the collections into the memory instead of loading the collection into memory anew every time MySQL: Added support for bulk insert to MySQL. Had to change the id generation from AUTO INCREMENT to uuids generated before insertion. This is because MySQL doesn't support bulk insert with returning all ids generated by auto increment. Co-authored-by: olgaoznovich Co-authored-by: Yuval-Roth --- data/mysql/init/init.sql | 2 +- docker-compose.yaml | 2 +- modelcache/adapter/adapter_query.py | 2 +- modelcache/manager/scalar_data/sql_storage.py | 119 +++++++++++++----- modelcache/manager/vector_data/milvus.py | 52 ++++---- reference_doc/create_table.sql | 11 +- 6 files changed, 132 insertions(+), 56 deletions(-) diff --git a/data/mysql/init/init.sql b/data/mysql/init/init.sql index 8e34c0a..a358de2 100644 --- a/data/mysql/init/init.sql +++ b/data/mysql/init/init.sql @@ -3,7 +3,7 @@ CREATE DATABASE IF NOT EXISTS `modelcache`; USE `modelcache`; CREATE TABLE IF NOT EXISTS `modelcache_llm_answer` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT comment '主键', + `id` CHAR(36) comment '主键', `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', `question` text NOT NULL comment 'question', diff --git a/docker-compose.yaml b/docker-compose.yaml index 31ed1ab..06efd4c 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,7 +1,7 @@ name: "modelcache" services: mysql: - image: mysql:9.3.0 + image: mysql:8.0.23 container_name: mysql environment: MYSQL_ROOT_PASSWORD: 'root' diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index 5a91807..e9d595c 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -11,7 +11,7 @@ def adapt_query(cache_data_convert, *args, **kwargs): chat_cache = kwargs.pop("cache_obj", cache) - scope = kwargs.pop("scope", None) + scope = kwargs.pop("scope") model = scope['model'] if not chat_cache.has_init: raise NotInitError() diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index 52fd559..dee2a73 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import os import time +import uuid import pymysql import json @@ -42,26 +43,59 @@ def _insert(self, data: List): answer_type = 0 embedding_data = embedding_data.tobytes() is_deleted = 0 + _id = str(uuid.uuid4()) table_name = "modelcache_llm_answer" - insert_sql = "INSERT INTO {} (question, answer, answer_type, model, embedding_data, is_deleted) VALUES (%s, %s, %s, %s, _binary%s, %s)".format(table_name) + insert_sql = f""" + INSERT INTO {table_name} + (id, question, answer, answer_type, model, embedding_data, is_deleted) + VALUES (%s, %s, %s, %s, %s, _binary%s, %s) + """ conn = self.pool.connection() try: with conn.cursor() as cursor: # 执行插入数据操作 - values = (question, answer, answer_type, model, embedding_data, is_deleted) + values = (_id, question, answer, answer_type, model, embedding_data, is_deleted) cursor.execute(insert_sql, values) conn.commit() - id = cursor.lastrowid finally: # 关闭连接,将连接返回给连接池 conn.close() - return id + return _id - def batch_insert(self, all_data: List[CacheData]): + def batch_insert(self, all_data: List[List]): + table_name = "modelcache_llm_answer" + insert_sql = f""" + INSERT INTO {table_name} + (id, question, answer, answer_type, model, embedding_data, is_deleted) + VALUES (%s, %s, %s, %s, %s, %s, %s) + """ + + values_list = [] ids = [] + for data in all_data: - ids.append(self._insert(data)) + answer = data[0] + question = data[1] + embedding_data = data[2].tobytes() + model = data[3] + answer_type = 0 + is_deleted = 0 + _id = str(uuid.uuid4()) + ids.append(_id) + + values_list.append(( + _id, question, answer, answer_type, model, embedding_data, is_deleted + )) + + conn = self.pool.connection() + try: + with conn.cursor() as cursor: + cursor.executemany(insert_sql, values_list) + conn.commit() + finally: + conn.close() + return ids def insert_query_resp(self, query_resp, **kwargs): @@ -78,7 +112,11 @@ def insert_query_resp(self, query_resp, **kwargs): hit_query = json.dumps(hit_query, ensure_ascii=False) table_name = "modelcache_query_log" - insert_sql = "INSERT INTO {} (error_code, error_desc, cache_hit, model, query, delta_time, hit_query, answer) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)".format(table_name) + insert_sql = f""" + INSERT INTO {table_name} + (error_code, error_desc, cache_hit, model, query, delta_time, hit_query, answer) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s) + """ conn = self.pool.connection() try: with conn.cursor() as cursor: @@ -92,15 +130,16 @@ def insert_query_resp(self, query_resp, **kwargs): def get_data_by_id(self, key: int): table_name = "modelcache_llm_answer" - query_sql = "select question, answer, embedding_data, model from {} where id={}".format(table_name, key) - conn_start = time.time() + query_sql = f""" + SELECT question, answer, embedding_data, model + FROM {table_name} + WHERE id = %s + """ conn = self.pool.connection() - - search_start = time.time() try: with conn.cursor() as cursor: # 执行数据库操作 - cursor.execute(query_sql) + cursor.execute(query_sql, (key,)) resp = cursor.fetchone() finally: # 关闭连接,将连接返回给连接池 @@ -113,14 +152,18 @@ def get_data_by_id(self, key: int): def update_hit_count_by_id(self, primary_id: int): table_name = "modelcache_llm_answer" - update_sql = "UPDATE {} SET hit_count = hit_count+1 WHERE id={}".format(table_name, primary_id) + update_sql = f""" + UPDATE {table_name} + SET hit_count = hit_count+1 + WHERE id = %s + """ conn = self.pool.connection() # 使用连接执行更新数据操作 try: with conn.cursor() as cursor: # 执行更新数据操作 - cursor.execute(update_sql) + cursor.execute(update_sql,(primary_id,)) conn.commit() finally: # 关闭连接,将连接返回给连接池 @@ -129,12 +172,16 @@ def update_hit_count_by_id(self, primary_id: int): def get_ids(self, deleted=True): table_name = "modelcache_llm_answer" state = 1 if deleted else 0 - query_sql = "Select id FROM {} WHERE is_deleted = {}".format(table_name, state) + query_sql = f""" + SELECT id + FROM {table_name} + WHERE is_deleted = %s + """ conn = self.pool.connection() try: with conn.cursor() as cursor: - cursor.execute(query_sql) + cursor.execute(query_sql, (state,)) ids = [row[0] for row in cursor.fetchall()] finally: conn.close() @@ -143,37 +190,45 @@ def get_ids(self, deleted=True): def mark_deleted(self, keys): table_name = "modelcache_llm_answer" - mark_sql = " update {} set is_deleted=1 WHERE id in ({})".format(table_name, ",".join([str(i) for i in keys])) + placeholders = ",".join(["%s"] * len(keys)) + mark_sql = f""" + UPDATE {table_name} + SET is_deleted=1 + WHERE id in ({placeholders}) + """ - # 从连接池中获取连接 conn = self.pool.connection() try: with conn.cursor() as cursor: - # 执行删除数据操作 - cursor.execute(mark_sql) + cursor.execute(mark_sql, keys) delete_count = cursor.rowcount conn.commit() finally: - # 关闭连接,将连接返回给连接池 conn.close() return delete_count def model_deleted(self, model_name): table_name = "modelcache_llm_answer" - delete_sql = "Delete from {} WHERE model='{}'".format(table_name, model_name) + delete_sql = f""" + Delete from {table_name} + WHERE model = %s + """ table_log_name = "modelcache_query_log" - delete_log_sql = "Delete from {} WHERE model='{}'".format(table_log_name, model_name) + delete_log_sql = f""" + Delete from {table_log_name} + WHERE model = %s + """ conn = self.pool.connection() # 使用连接执行删除数据操作 try: with conn.cursor() as cursor: # 执行删除数据操作 - resp = cursor.execute(delete_sql) + resp = cursor.execute(delete_sql, (model_name,)) conn.commit() # 执行删除该模型对应日志操作 resp_log行数不返回 - resp_log = cursor.execute(delete_log_sql) + resp_log = cursor.execute(delete_log_sql, (model_name,)) conn.commit() # 分别提交事务 finally: # 关闭连接,将连接返回给连接池 @@ -182,7 +237,10 @@ def model_deleted(self, model_name): def clear_deleted_data(self): table_name = "modelcache_llm_answer" - delete_sql = "DELETE FROM {} WHERE is_deleted = 1".format(table_name) + delete_sql = f""" + DELETE FROM {table_name} + WHERE is_deleted = 1 + """ conn = self.pool.connection() try: @@ -197,10 +255,15 @@ def clear_deleted_data(self): def count(self, state: int = 0, is_all: bool = False): table_name = "modelcache_llm_answer" + + # we're not using prepared statements here, so we need to ensure state is an integer + if not isinstance(state, int): + raise ValueError("'state' must be an integer.") + if is_all: - count_sql = "SELECT COUNT(*) FROM {}".format(table_name) + count_sql = f"SELECT COUNT(*) FROM {table_name}" else: - count_sql = "SELECT COUNT(*) FROM {} WHERE is_deleted = {}".format(table_name,state) + count_sql = f"SELECT COUNT(*) FROM {table_name} WHERE is_deleted = {state}" conn = self.pool.connection() try: diff --git a/modelcache/manager/vector_data/milvus.py b/modelcache/manager/vector_data/milvus.py index 50d6ab1..b16d884 100644 --- a/modelcache/manager/vector_data/milvus.py +++ b/modelcache/manager/vector_data/milvus.py @@ -66,6 +66,8 @@ def __init__( self.search_params = ( search_params or self.SEARCH_PARAM[self.index_params["index_type"]] ) + self.collections = dict() + def _connect(self, host, port, user, password, secure): try: @@ -87,12 +89,14 @@ def _connect(self, host, port, user, password, secure): timeout=10 ) + def _create_collection(self, collection_name): if not utility.has_collection(collection_name, using=self.alias): schema = [ FieldSchema( name="id", - dtype=DataType.INT64, + dtype=DataType.VARCHAR, + max_length=36, is_primary=True, auto_id=False, ), @@ -101,7 +105,8 @@ def _create_collection(self, collection_name): ), ] schema = CollectionSchema(schema) - self.col = Collection( + + new_collection = Collection( collection_name, schema=schema, consistency_level="Session", @@ -109,46 +114,48 @@ def _create_collection(self, collection_name): ) else: modelcache_log.warning("The %s collection already exists, and it will be used directly.", collection_name) - self.col = Collection( + new_collection = Collection( collection_name, consistency_level="Session", using=self.alias ) - if len(self.col.indexes) == 0: + self.collections[collection_name] = new_collection + + if len(new_collection.indexes) == 0: try: modelcache_log.info("Attempting creation of Milvus index.") - self.col.create_index("embedding", index_params=self.index_params) + new_collection.create_index("embedding", index_params=self.index_params) modelcache_log.info("Creation of Milvus index successful.") except MilvusException as e: modelcache_log.warning("Error with building index: %s, and attempting creation of default index.", e) i_p = {"metric_type": "L2", "index_type": "AUTOINDEX", "params": {}} - self.col.create_index("embedding", index_params=i_p) + new_collection.create_index("embedding", index_params=i_p) self.index_params = i_p else: - self.index_params = self.col.indexes[0].to_dict()["index_param"] + self.index_params = new_collection.indexes[0].to_dict()["index_param"] + + new_collection.load() - self.col.load() def _get_collection(self, collection_name): - self.col = Collection( - collection_name, consistency_level="Session", using=self.alias - ) - self.col.load() + if collection_name not in self.collections: + self._create_collection(collection_name) + return self.collections[collection_name] def mul_add(self, datas: List[VectorData], model=None): collection_name_model = self.collection_name + '_' + model - self._create_collection(collection_name_model) - + col = self._get_collection(collection_name_model) data_array, id_array = map(list, zip(*((data.data, data.id) for data in datas))) np_data = np.array(data_array).astype("float32") entities = [id_array, np_data] - self.col.insert(entities) + col.insert(entities) + def search(self, data: np.ndarray, top_k: int = -1, model=None): if top_k == -1: top_k = self.top_k collection_name_model = self.collection_name + '_' + model - self._create_collection(collection_name_model) - search_result = self.col.search( + col = self._get_collection(collection_name_model) + search_result = col.search( data=data.reshape(1, -1).tolist(), anns_field="embedding", param=self.search_params, @@ -156,12 +163,13 @@ def search(self, data: np.ndarray, top_k: int = -1, model=None): ) return list(zip(search_result[0].distances, search_result[0].ids)) + def delete(self, ids, model=None): collection_name_model = self.collection_name + '_' + model - self._get_collection(collection_name_model) + col = self._get_collection(collection_name_model) del_ids = ",".join([str(x) for x in ids]) - resp = self.col.delete(f"id in [{del_ids}]") + resp = col.delete(f"id in [{del_ids}]") delete_count = resp.delete_count return delete_count @@ -178,10 +186,12 @@ def rebuild_col(self, model): logging.info('create_collection: {}'.format(e)) def rebuild(self, ids=None): # pylint: disable=unused-argument - self.col.compact() + for col in self.collections.values(): + col.compact() def flush(self): - self.col.flush(_async=True) + for col in self.collections.values(): + col.flush(_async=True) def close(self): self.flush() diff --git a/reference_doc/create_table.sql b/reference_doc/create_table.sql index 1d0f30a..a358de2 100644 --- a/reference_doc/create_table.sql +++ b/reference_doc/create_table.sql @@ -1,5 +1,9 @@ -CREATE TABLE `modelcache_llm_answer` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT comment '主键', +CREATE DATABASE IF NOT EXISTS `modelcache`; + +USE `modelcache`; + +CREATE TABLE IF NOT EXISTS `modelcache_llm_answer` ( + `id` CHAR(36) comment '主键', `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', `question` text NOT NULL comment 'question', @@ -12,8 +16,7 @@ CREATE TABLE `modelcache_llm_answer` ( PRIMARY KEY(`id`) ) AUTO_INCREMENT = 1 DEFAULT CHARSET = utf8mb4 COMMENT = 'cache_codegpt_answer'; - -CREATE TABLE `modelcache_query_log` ( +CREATE TABLE IF NOT EXISTS `modelcache_query_log` ( `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT comment '主键', `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP comment '创建时间', `gmt_modified` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP comment '修改时间', From b474d156df1214ed8b090c7169f2210853044f89 Mon Sep 17 00:00:00 2001 From: Yuval Roth Date: Sun, 8 Jun 2025 00:21:55 +0300 Subject: [PATCH 87/98] Changed the embedding model to be mpnet-base-v2 added flags for its usage and updated milvus to use cosine instead of L2 for searching Co-authored-by: olgaoznovich Co-authored-by: Yuval-Roth --- flask4modelcache.py | 42 ++++++- model/download_bert_embedder.bat | 1 - modelcache/adapter/adapter_query.py | 145 ++++++++++++---------- modelcache/embedding/mpnet_base.py | 17 +++ modelcache/manager/vector_data/manager.py | 2 + requirements.txt | 3 +- 6 files changed, 137 insertions(+), 73 deletions(-) delete mode 100644 model/download_bert_embedder.bat create mode 100644 modelcache/embedding/mpnet_base.py diff --git a/flask4modelcache.py b/flask4modelcache.py index 823c66b..e30d3b3 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -6,6 +6,8 @@ import json from modelcache import cache from modelcache.adapter import adapter +from modelcache.embedding.mpnet_base import MPNet_Base +from modelcache.manager.vector_data import manager from modelcache.manager import CacheBase, VectorBase, get_data_manager, data_manager from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation from modelcache.processor.pre import query_multi_splicing @@ -30,9 +32,17 @@ def save_query_info(result, model, query, delta_time_log): def response_hitquery(cache_resp): return cache_resp['hitQuery'] -data2vec = Data2VecAudio() -embedding_func = data2vec.to_embeddings -dimension = data2vec.dimension +manager.MPNet_base = True + +if manager.MPNet_base: + mpnet_base = MPNet_Base() + embedding_func = lambda x: mpnet_base.embedding_func(x) + dimension = mpnet_base.dimension + data_manager.NORMALIZE = False +else: + data2vec = Data2VecAudio() + embedding_func = data2vec.to_embeddings + dimension = data2vec.dimension mysql_config = configparser.ConfigParser() mysql_config.read('modelcache/config/mysql_config.ini') @@ -49,8 +59,30 @@ def response_hitquery(cache_resp): # chromadb_config = configparser.ConfigParser() # chromadb_config.read('modelcache/config/chromadb_config.ini') -data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), - VectorBase("milvus", dimension=dimension, milvus_config=milvus_config)) +data_manager = get_data_manager( + CacheBase("mysql", config=mysql_config), + VectorBase("milvus", + dimension=dimension, + milvus_config=milvus_config, + index_params={ + "metric_type": "COSINE", + "index_type": "HNSW", + "params": {"M": 16, "efConstruction": 64}, + } if manager.MPNet_base else None, + search_params={ + "IVF_FLAT": {"metric_type": "COSINE", "params": {"nprobe": 10}}, + "IVF_SQ8": {"metric_type": "COSINE", "params": {"nprobe": 10}}, + "IVF_PQ": {"metric_type": "COSINE", "params": {"nprobe": 10}}, + "HNSW": {"metric_type": "COSINE", "params": {"ef": 10}}, + "RHNSW_FLAT": {"metric_type": "COSINE", "params": {"ef": 10}}, + "RHNSW_SQ": {"metric_type": "COSINE", "params": {"ef": 10}}, + "RHNSW_PQ": {"metric_type": "COSINE", "params": {"ef": 10}}, + "IVF_HNSW": {"metric_type": "COSINE", "params": {"nprobe": 10, "ef": 10}}, + "ANNOY": {"metric_type": "COSINE", "params": {"search_k": 10}}, + "AUTOINDEX": {"metric_type": "COSINE", "params": {}}, + } if manager.MPNet_base else None + ) +) # data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), diff --git a/model/download_bert_embedder.bat b/model/download_bert_embedder.bat deleted file mode 100644 index e4c26af..0000000 --- a/model/download_bert_embedder.bat +++ /dev/null @@ -1 +0,0 @@ -curl -o embedder.tflite https://storage.googleapis.com/mediapipe-models/text_embedder/bert_embedder/float32/1/bert_embedder.tflite \ No newline at end of file diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index e9d595c..ae8e894 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -1,11 +1,13 @@ # -*- coding: utf-8 -*- import logging import time + from modelcache import cache from modelcache.utils.error import NotInitError from modelcache.utils.time import time_cal from modelcache.processor.pre import multi_analysis from FlagEmbedding import FlagReranker +from modelcache.manager.vector_data import manager USE_RERANKER = False # 如果为 True 则启用 reranker,否则使用原有逻辑 @@ -44,39 +46,47 @@ def adapt_query(cache_data_convert, *args, **kwargs): cache_answers = [] cache_questions = [] cache_ids = [] - similarity_threshold = chat_cache.config.similarity_threshold - similarity_threshold_long = chat_cache.config.similarity_threshold_long + cosine_similarity = cache_data_list[0][0] - min_rank, max_rank = chat_cache.similarity_evaluation.range() - rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor - rank_threshold_long = (max_rank - min_rank) * similarity_threshold_long * cache_factor - rank_threshold = ( - max_rank - if rank_threshold > max_rank - else min_rank - if rank_threshold < min_rank - else rank_threshold - ) - rank_threshold_long = ( - max_rank - if rank_threshold_long > max_rank - else min_rank - if rank_threshold_long < min_rank - else rank_threshold_long - ) - if cache_data_list is None or len(cache_data_list) == 0: - rank_pre = -1.0 + if manager.MPNet_base: + # This code uses the built-in cosine similarity evaluation in milvus + if cosine_similarity < 0.9: + return None else: - cache_data_dict = {'search_result': cache_data_list[0]} - rank_pre = chat_cache.similarity_evaluation.evaluation( - None, - cache_data_dict, - extra_param=context.get("evaluation_func", None), + ## this is the code that uses L2 for similarity evaluation + similarity_threshold = chat_cache.config.similarity_threshold + similarity_threshold_long = chat_cache.config.similarity_threshold_long + + min_rank, max_rank = chat_cache.similarity_evaluation.range() + rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor + rank_threshold_long = (max_rank - min_rank) * similarity_threshold_long * cache_factor + rank_threshold = ( + max_rank + if rank_threshold > max_rank + else min_rank + if rank_threshold < min_rank + else rank_threshold + ) + rank_threshold_long = ( + max_rank + if rank_threshold_long > max_rank + else min_rank + if rank_threshold_long < min_rank + else rank_threshold_long ) - if rank_pre < rank_threshold: - return None + if cache_data_list is None or len(cache_data_list) == 0: + rank_pre = -1.0 + else: + cache_data_dict = {'search_result': cache_data_list[0]} + rank_pre = chat_cache.similarity_evaluation.evaluation( + None, + cache_data_dict, + extra_param=context.get("evaluation_func", None), + ) + if rank_pre < rank_threshold: + return None - if USE_RERANKER: + if USE_RERANKER and not manager.MPNet_base: reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=False) for cache_data in cache_data_list: primary_id = cache_data[1] @@ -132,45 +142,50 @@ def adapt_query(cache_data_convert, *args, **kwargs): if ret is None: continue - if "deps" in context and hasattr(ret.question, "deps"): - eval_query_data = { - "question": context["deps"][0]["data"], - "embedding": None - } - eval_cache_data = { - "question": ret.question.deps[0].data, - "answer": ret.answers[0].answer, - "search_result": cache_data, - "embedding": None, - } + if manager.MPNet_base: + cache_answers.append((cosine_similarity, ret[1])) + cache_questions.append((cosine_similarity, ret[0])) + cache_ids.append((cosine_similarity, primary_id)) else: - eval_query_data = { - "question": pre_embedding_data, - "embedding": embedding_data, - } + if "deps" in context and hasattr(ret.question, "deps"): + eval_query_data = { + "question": context["deps"][0]["data"], + "embedding": None + } + eval_cache_data = { + "question": ret.question.deps[0].data, + "answer": ret.answers[0].answer, + "search_result": cache_data, + "embedding": None, + } + else: + eval_query_data = { + "question": pre_embedding_data, + "embedding": embedding_data, + } - eval_cache_data = { - "question": ret[0], - "answer": ret[1], - "search_result": cache_data, - "embedding": None - } - rank = chat_cache.similarity_evaluation.evaluation( - eval_query_data, - eval_cache_data, - extra_param=context.get("evaluation_func", None), - ) + eval_cache_data = { + "question": ret[0], + "answer": ret[1], + "search_result": cache_data, + "embedding": None + } + rank = chat_cache.similarity_evaluation.evaluation( + eval_query_data, + eval_cache_data, + extra_param=context.get("evaluation_func", None), + ) - if len(pre_embedding_data) <= 256: - if rank_threshold <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) - cache_ids.append((rank, primary_id)) - else: - if rank_threshold_long <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) - cache_ids.append((rank, primary_id)) + if len(pre_embedding_data) <= 256: + if rank_threshold <= rank: + cache_answers.append((rank, ret[1])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) + else: + if rank_threshold_long <= rank: + cache_answers.append((rank, ret[1])) + cache_questions.append((rank, ret[0])) + cache_ids.append((rank, primary_id)) cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True) cache_questions = sorted(cache_questions, key=lambda x: x[0], reverse=True) diff --git a/modelcache/embedding/mpnet_base.py b/modelcache/embedding/mpnet_base.py new file mode 100644 index 0000000..5c1b0c4 --- /dev/null +++ b/modelcache/embedding/mpnet_base.py @@ -0,0 +1,17 @@ +from sentence_transformers import SentenceTransformer + +class MPNet_Base: + def __init__(self): + self.dimension = 768 + self.model = SentenceTransformer('sentence-transformers/all-mpnet-base-v2') + + def embedding_func(self, *args, **kwargs): + if not args: + raise ValueError("No word provided for embedding.") + embeddings = self.model.encode(args) + return embeddings[0] if len(args) == 1 else embeddings + + def similarity(self, a, b): + if not a or not b: + raise ValueError("Both inputs must be non-empty for similarity calculation.") + return self.model.similarity(a, b) diff --git a/modelcache/manager/vector_data/manager.py b/modelcache/manager/vector_data/manager.py index 0fc5334..2167fbc 100644 --- a/modelcache/manager/vector_data/manager.py +++ b/modelcache/manager/vector_data/manager.py @@ -17,6 +17,8 @@ COLLECTION_NAME = "modelcache" +MPNet_base = False # whether to use MPNet base model for embedding, if True, will use cosine similarity evaluation in milvus + class VectorBase: """ diff --git a/requirements.txt b/requirements.txt index 44d332d..a84a988 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,5 +19,4 @@ elasticsearch==7.10.0 snowflake-id==1.0.2 flagembedding==1.3.4 cryptography==45.0.2 -mediapipe==0.10.21 -protobuf==4.25.8 +sentence-transformers==4.1.0 From 81b471573cc69249e7183c67bf246618a0c7415d Mon Sep 17 00:00:00 2001 From: Yuval Roth Date: Sun, 8 Jun 2025 14:53:05 +0300 Subject: [PATCH 88/98] Added custom pre embedding func and some other improvements Co-authored-by: olgaoznovich Co-authored-by: Yuval-Roth --- flask4modelcache.py | 13 ++++++++----- modelcache/embedding/mpnet_base.py | 2 +- modelcache/processor/pre.py | 7 ++++++- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/flask4modelcache.py b/flask4modelcache.py index e30d3b3..ff56157 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -10,8 +10,7 @@ from modelcache.manager.vector_data import manager from modelcache.manager import CacheBase, VectorBase, get_data_manager, data_manager from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation -from modelcache.processor.pre import query_multi_splicing -from modelcache.processor.pre import insert_multi_splicing +from modelcache.processor.pre import query_multi_splicing,insert_multi_splicing, query_with_role from concurrent.futures import ThreadPoolExecutor from modelcache.utils.model_filter import model_blacklist_filter from modelcache.embedding import Data2VecAudio @@ -36,13 +35,17 @@ def response_hitquery(cache_resp): if manager.MPNet_base: mpnet_base = MPNet_Base() - embedding_func = lambda x: mpnet_base.embedding_func(x) + embedding_func = mpnet_base.to_embeddings dimension = mpnet_base.dimension data_manager.NORMALIZE = False + query_pre_embedding_func=query_with_role + insert_pre_embedding_func=query_with_role else: data2vec = Data2VecAudio() embedding_func = data2vec.to_embeddings dimension = data2vec.dimension + query_pre_embedding_func=query_multi_splicing + insert_pre_embedding_func=insert_multi_splicing mysql_config = configparser.ConfigParser() mysql_config.read('modelcache/config/mysql_config.ini') @@ -95,8 +98,8 @@ def response_hitquery(cache_resp): embedding_func=embedding_func, data_manager=data_manager, similarity_evaluation=SearchDistanceEvaluation(), - query_pre_embedding_func=query_multi_splicing, - insert_pre_embedding_func=insert_multi_splicing, + query_pre_embedding_func=query_pre_embedding_func, + insert_pre_embedding_func=insert_pre_embedding_func, ) global executor diff --git a/modelcache/embedding/mpnet_base.py b/modelcache/embedding/mpnet_base.py index 5c1b0c4..a106948 100644 --- a/modelcache/embedding/mpnet_base.py +++ b/modelcache/embedding/mpnet_base.py @@ -5,7 +5,7 @@ def __init__(self): self.dimension = 768 self.model = SentenceTransformer('sentence-transformers/all-mpnet-base-v2') - def embedding_func(self, *args, **kwargs): + def to_embeddings(self, *args, **kwargs): if not args: raise ValueError("No word provided for embedding.") embeddings = self.model.encode(args) diff --git a/modelcache/processor/pre.py b/modelcache/processor/pre.py index 0c72d40..6a9d0f6 100644 --- a/modelcache/processor/pre.py +++ b/modelcache/processor/pre.py @@ -4,7 +4,7 @@ def insert_last_content(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: - return data.get("chat_info")[-1]["query"] + return data.get("query")[-1]["content"] def query_last_content(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: @@ -67,6 +67,11 @@ def insert_multi_splicing(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: insert_query_list = data['query'] return multi_splicing(insert_query_list) +def query_with_role(data: Dict[str, Any], **_: Dict[str, Any]) -> Any: + query = data["query"][-1] + content = query["content"] + role = query["role"] + return role+": "+content def multi_splicing(data_list) -> Any: result_str = "" From bd2caeb389fd5321f0b44bece2304591d9fa3fc9 Mon Sep 17 00:00:00 2001 From: adiaybgu Date: Tue, 10 Jun 2025 07:11:17 +0300 Subject: [PATCH 89/98] Added feature: Eviction logic and new memory cache types. connected the memory caching logic to the main logic because it was not connected up until now Co-authored-by: omerdor001 Co-authored-by: adiaybgu --- flask4modelcache.py | 4 +- modelcache/adapter/adapter.py | 1 + modelcache/adapter/adapter_query.py | 24 ++-- modelcache/embedding/mpnet_base.py | 2 +- modelcache/manager/data_manager.py | 69 +++++++-- modelcache/manager/eviction/arc_cache.py | 131 ++++++++++++++++++ modelcache/manager/eviction/base.py | 4 +- modelcache/manager/eviction/memory_cache.py | 52 +++++-- modelcache/manager/eviction/wtinylfu_cache.py | 129 +++++++++++++++++ modelcache/manager/factory.py | 6 +- modelcache/manager/scalar_data/base.py | 5 + modelcache/manager/scalar_data/sql_storage.py | 2 +- modelcache/manager/vector_data/milvus.py | 2 +- 13 files changed, 385 insertions(+), 46 deletions(-) create mode 100644 modelcache/manager/eviction/arc_cache.py create mode 100644 modelcache/manager/eviction/wtinylfu_cache.py diff --git a/flask4modelcache.py b/flask4modelcache.py index ff56157..d793953 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -84,7 +84,9 @@ def response_hitquery(cache_resp): "ANNOY": {"metric_type": "COSINE", "params": {"search_k": 10}}, "AUTOINDEX": {"metric_type": "COSINE", "params": {}}, } if manager.MPNet_base else None - ) + ), + eviction='WTINYLFU', + max_size=100000 ) diff --git a/modelcache/adapter/adapter.py b/modelcache/adapter/adapter.py index 452be7b..ddad9af 100644 --- a/modelcache/adapter/adapter.py +++ b/modelcache/adapter/adapter.py @@ -30,6 +30,7 @@ def create_insert(cls, *args, **kwargs): **kwargs ) except Exception as e: + print(e) return str(e) @classmethod diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index ae8e894..aa42eae 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -91,7 +91,7 @@ def adapt_query(cache_data_convert, *args, **kwargs): for cache_data in cache_data_list: primary_id = cache_data[1] ret = chat_cache.data_manager.get_scalar_data( - cache_data, extra_param=context.get("get_scalar_data", None) + cache_data, extra_param=context.get("get_scalar_data", None),model=model ) if ret is None: continue @@ -124,27 +124,27 @@ def adapt_query(cache_data_convert, *args, **kwargs): if len(pre_embedding_data) <= 256: if rank_threshold <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) + cache_answers.append((rank, ret[0])) + cache_questions.append((rank, ret[1])) cache_ids.append((rank, primary_id)) else: if rank_threshold_long <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) + cache_answers.append((rank, ret[0])) + cache_questions.append((rank, ret[1])) cache_ids.append((rank, primary_id)) else: # 不使用 reranker 时,走原来的逻辑 for cache_data in cache_data_list: primary_id = cache_data[1] ret = chat_cache.data_manager.get_scalar_data( - cache_data, extra_param=context.get("get_scalar_data", None) + cache_data, extra_param=context.get("get_scalar_data", None),model=model ) if ret is None: continue if manager.MPNet_base: - cache_answers.append((cosine_similarity, ret[1])) - cache_questions.append((cosine_similarity, ret[0])) + cache_answers.append((cosine_similarity, ret[0])) + cache_questions.append((cosine_similarity, ret[1])) cache_ids.append((cosine_similarity, primary_id)) else: if "deps" in context and hasattr(ret.question, "deps"): @@ -178,13 +178,13 @@ def adapt_query(cache_data_convert, *args, **kwargs): if len(pre_embedding_data) <= 256: if rank_threshold <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) + cache_answers.append((rank, ret[0])) + cache_questions.append((rank, ret[1])) cache_ids.append((rank, primary_id)) else: if rank_threshold_long <= rank: - cache_answers.append((rank, ret[1])) - cache_questions.append((rank, ret[0])) + cache_answers.append((rank, ret[0])) + cache_questions.append((rank, ret[1])) cache_ids.append((rank, primary_id)) cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True) diff --git a/modelcache/embedding/mpnet_base.py b/modelcache/embedding/mpnet_base.py index a106948..b272c5c 100644 --- a/modelcache/embedding/mpnet_base.py +++ b/modelcache/embedding/mpnet_base.py @@ -7,7 +7,7 @@ def __init__(self): def to_embeddings(self, *args, **kwargs): if not args: - raise ValueError("No word provided for embedding.") + raise ValueError("No data provided for embedding.") embeddings = self.model.encode(args) return embeddings[0] if len(args) == 1 else embeddings diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index ce9ab0c..3b20fd3 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -22,6 +22,7 @@ from modelcache.manager.object_data.base import ObjectBase from modelcache.manager.eviction import EvictionBase from modelcache.manager.eviction_manager import EvictionManager +from modelcache.manager.eviction.memory_cache import MemoryCacheEviction from modelcache.utils.log import modelcache_log NORMALIZE = True @@ -38,9 +39,7 @@ def save_query_resp(self, query_resp_dict, **kwargs): pass @abstractmethod - def import_data( - self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model:Any - ): + def import_data(self, questions: List[Any], answers: List[Any], embedding_datas: List[Any], model:Any): pass @abstractmethod @@ -162,10 +161,18 @@ def __init__( self.v = v self.o = o + # added + self.eviction_base = MemoryCacheEviction( + policy=policy, + maxsize=max_size, + clean_size=clean_size, + on_evict=self._evict_ids) + def save(self, questions: List[any], answers: List[any], embedding_datas: List[any], **kwargs): model = kwargs.pop("model", None) self.import_data(questions, answers, embedding_datas, model) + def save_query_resp(self, query_resp_dict, **kwargs): save_query_start_time = time.time() self.s.insert_query_resp(query_resp_dict, **kwargs) @@ -217,14 +224,20 @@ def import_data( cache_datas.append([ans, question, embedding_data, model]) ids = self.s.batch_insert(cache_datas) - datas_ = [VectorData(id=ids[i], data=embedding_data.astype("float32")) for i, embedding_data in enumerate(embedding_datas)] - self.v.mul_add( - datas_, - model - - ) + datas = [] + for i, embedding_data in enumerate(embedding_datas): + _id = ids[i] + datas.append(VectorData(id=_id, data=embedding_data.astype("float32"))) + self.eviction_base.put([(_id, cache_datas[i])],model=model) + self.v.mul_add(datas,model) def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: + model = kwargs.pop("model") + #Get Data from RAM Cache + _id = res_data[1] + cache_hit = self.eviction_base.get(_id, model=model) + if cache_hit is not None: + return cache_hit cache_data = self.s.get_data_by_id(res_data[1]) if cache_data is None: return None @@ -244,8 +257,10 @@ def search(self, embedding_data, **kwargs): return self.v.search(data=embedding_data, top_k=top_k, model=model) def delete(self, id_list, **kwargs): - model = kwargs.pop("model", None) + model = kwargs.pop("model") try: + for id in id_list: + self.eviction_base.get_cache(model).pop(id, None) # Remove from in-memory LRU too v_delete_count = self.v.delete(ids=id_list, model=model) except Exception as e: return {'status': 'failed', 'milvus': 'delete milvus data failed, please check! e: {}'.format(e), @@ -262,10 +277,13 @@ def delete(self, id_list, **kwargs): def create_index(self, model, **kwargs): return self.v.create(model) - def truncate(self, model_name): + def truncate(self, model): + # drop memory cache data + self.eviction_base.clear(model) + # drop vector base data try: - vector_resp = self.v.rebuild_col(model_name) + vector_resp = self.v.rebuild_col(model) except Exception as e: return {'status': 'failed', 'VectorDB': 'truncate VectorDB data failed, please check! e: {}'.format(e), 'ScalarDB': 'unexecuted'} @@ -273,12 +291,37 @@ def truncate(self, model_name): return {'status': 'failed', 'VectorDB': vector_resp, 'ScalarDB': 'unexecuted'} # drop scalar base data try: - delete_count = self.s.model_deleted(model_name) + delete_count = self.s.model_deleted(model) except Exception as e: return {'status': 'failed', 'VectorDB': 'rebuild', 'ScalarDB': 'truncate scalar data failed, please check! e: {}'.format(e)} return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} + # added + def _evict_ids(self, ids, **kwargs): + model = kwargs.get("model") + if not ids or any(i is None for i in ids): + modelcache_log.warning("Skipping eviction for invalid IDs: %s", ids) + return + + if isinstance(ids,str): + ids = [ids] + + for _id in ids: + self.eviction_base.get_cache(model).pop(_id, None) + + try: + self.s.mark_deleted(ids) + modelcache_log.info("Evicted from scalar storage: %s", ids) + except Exception as e: + modelcache_log.error("Failed to delete from scalar storage: %s", str(e)) + + try: + self.v.delete(ids, model=model) + modelcache_log.info("Evicted from vector storage (model=%s): %s", model, ids) + except Exception as e: + modelcache_log.error("Failed to delete from vector storage (model=%s): %s", model, str(e)) + def flush(self): self.s.flush() self.v.flush() diff --git a/modelcache/manager/eviction/arc_cache.py b/modelcache/manager/eviction/arc_cache.py new file mode 100644 index 0000000..509fae0 --- /dev/null +++ b/modelcache/manager/eviction/arc_cache.py @@ -0,0 +1,131 @@ +from cachetools import Cache +from collections import OrderedDict + +class ARC(Cache): + """ + Adaptive Replacement Cache (ARC) implementation with on_evict callback. + Balances recency and frequency via two active lists (T1, T2) and two ghost lists (B1, B2). + Calls on_evict([key]) whenever an item is evicted from the active cache. + """ + + def __init__(self, maxsize, getsizeof=None, on_evict=None): + """ + Args: + maxsize (int): Maximum cache size. + getsizeof (callable, optional): Sizing function for items. + on_evict (callable, optional): Callback called as on_evict([key]) when a key is evicted. + """ + super().__init__(maxsize, getsizeof) + self.t1 = OrderedDict() + self.t2 = OrderedDict() + self.b1 = OrderedDict() + self.b2 = OrderedDict() + self.p = 0 # Adaptive target for T1 size. + self.on_evict = on_evict + + def __len__(self): + return len(self.t1) + len(self.t2) + + def __contains__(self, key): + return key in self.t1 or key in self.t2 + + def _evict_internal(self): + """ + Evicts items from T1 or T2 if cache is over capacity, and prunes ghost lists. + Calls on_evict for each evicted key. + """ + # Evict from T1 or T2 if active cache > maxsize + while len(self.t1) + len(self.t2) > self.maxsize: + if len(self.t1) > self.p or (len(self.t1) == 0 and len(self.t2) > 0): + key, value = self.t1.popitem(last=False) + self.b1[key] = value + if self.on_evict: + self.on_evict([key]) + else: + key, value = self.t2.popitem(last=False) + self.b2[key] = value + if self.on_evict: + self.on_evict([key]) + # Prune ghost lists to their max lengths + while len(self.b1) > (self.maxsize - self.p): + self.b1.popitem(last=False) + while len(self.b2) > self.p: + self.b2.popitem(last=False) + + def __setitem__(self, key, value): + # Remove from all lists before re-inserting + for l in (self.t1, self.t2, self.b1, self.b2): + l.pop(key, None) + self.t1[key] = value + self.t1.move_to_end(key) + self._evict_internal() + + def __getitem__(self, key): + # Case 1: Hit in T1 → promote to T2 + if key in self.t1: + value = self.t1.pop(key) + self.t2[key] = value + self.t2.move_to_end(key) + self.p = max(0, self.p - 1) + self._evict_internal() + return value + # Case 2: Hit in T2 → refresh in T2 + if key in self.t2: + value = self.t2.pop(key) + self.t2[key] = value + self.t2.move_to_end(key) + self.p = min(self.maxsize, self.p + 1) + self._evict_internal() + return value + # Case 3: Hit in B1 (ghost) → fetch and promote to T2 + if key in self.b1: + self.b1.pop(key) + self.p = min(self.maxsize, self.p + 1) + self._evict_internal() + value = super().__missing__(key) + self.t2[key] = value + self.t2.move_to_end(key) + return value + # Case 4: Hit in B2 (ghost) → fetch and promote to T2 + if key in self.b2: + self.b2.pop(key) + self.p = max(0, self.p - 1) + self._evict_internal() + value = super().__missing__(key) + self.t2[key] = value + self.t2.move_to_end(key) + return value + # Case 5: Cold miss → handled by Cache base class (calls __setitem__ after __missing__) + return super().__getitem__(key) + + def __missing__(self, key): + """ + Override this in a subclass, or rely on direct assignment (cache[key] = value). + """ + raise KeyError(key) + + def pop(self, key, default=None): + """ + Remove key from all lists. + """ + for l in (self.t1, self.t2, self.b1, self.b2): + if key in l: + return l.pop(key) + return default + + def clear(self): + self.t1.clear() + self.t2.clear() + self.b1.clear() + self.b2.clear() + self.p = 0 + super().clear() + + def __iter__(self): + yield from self.t1 + yield from self.t2 + + def __repr__(self): + return (f"ARC(maxsize={self.maxsize}, p={self.p}, len={len(self)}, " + f"t1_len={len(self.t1)}, t2_len={len(self.t2)}, " + f"b1_len={len(self.b1)}, b2_len={len(self.b2)})") diff --git a/modelcache/manager/eviction/base.py b/modelcache/manager/eviction/base.py index 352c9a3..56efa54 100644 --- a/modelcache/manager/eviction/base.py +++ b/modelcache/manager/eviction/base.py @@ -9,11 +9,11 @@ class EvictionBase(metaclass=ABCMeta): """ @abstractmethod - def put(self, objs: List[Any]): + def put(self, objs: List[Any], model:str): pass @abstractmethod - def get(self, obj: Any): + def get(self, obj: Any, model:str): pass @property diff --git a/modelcache/manager/eviction/memory_cache.py b/modelcache/manager/eviction/memory_cache.py index 3cb487f..c6272f6 100644 --- a/modelcache/manager/eviction/memory_cache.py +++ b/modelcache/manager/eviction/memory_cache.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- -from typing import Any, Callable, List +from typing import Any, Callable, List, Tuple import cachetools from modelcache.manager.eviction.base import EvictionBase +from .arc_cache import ARC +from .wtinylfu_cache import W2TinyLFU def popitem_wrapper(func, wrapper_func, clean_size): @@ -19,25 +21,51 @@ def wrapper(*args, **kwargs): class MemoryCacheEviction(EvictionBase): def __init__(self, policy: str, maxsize: int, clean_size: int, on_evict: Callable[[List[Any]], None], **kwargs): self._policy = policy.upper() + self.model_to_cache = dict() + self.maxsize = maxsize + self.clean_size = clean_size + self.on_evict = on_evict + self.kwargs = kwargs + + def create_cache(self, model: str): if self._policy == "LRU": - self._cache = cachetools.LRUCache(maxsize=maxsize, **kwargs) + cache = cachetools.LRUCache(maxsize=self.maxsize, **self.kwargs) elif self._policy == "LFU": - self._cache = cachetools.LFUCache(maxsize=maxsize, **kwargs) + cache = cachetools.LFUCache(maxsize=self.maxsize, **self.kwargs) elif self._policy == "FIFO": - self._cache = cachetools.FIFOCache(maxsize=maxsize, **kwargs) + cache = cachetools.FIFOCache(maxsize=self.maxsize, **self.kwargs) elif self._policy == "RR": - self._cache = cachetools.RRCache(maxsize=maxsize, **kwargs) + cache = cachetools.RRCache(maxsize=self.maxsize, **self.kwargs) + elif self._policy == "WTINYLFU": + cache = W2TinyLFU(maxsize=self.maxsize, on_evict=lambda x: self.on_evict(x,model=model)) + elif self._policy == "ARC": + cache = ARC(maxsize=self.maxsize, on_evict=lambda x: self.on_evict(x,model=model)) else: - raise ValueError(f"Unknown policy {policy}") + raise ValueError(f"Unknown policy {self.policy}") + cache.popitem = popitem_wrapper(cache.popitem, self.on_evict, self.clean_size) + return cache + + + def put(self, objs: List[Tuple[Any, Any]], model: str): + cache = self.get_cache(model) + for key, value in objs: + cache[key] = value + + + def get(self, obj: Any, model: str): + cache = self.get_cache(model) + return cache.get(obj) + + + def clear(self, model: str): + self.model_to_cache.pop(model, None) - self._cache.popitem = popitem_wrapper(self._cache.popitem, on_evict, clean_size) - def put(self, objs: List[Any]): - for obj in objs: - self._cache[obj] = True + def get_cache(self, model: str): + if not model in self.model_to_cache: + self.model_to_cache[model] = self.create_cache(model) + return self.model_to_cache[model] - def get(self, obj: Any): - return self._cache.get(obj) @property def policy(self) -> str: diff --git a/modelcache/manager/eviction/wtinylfu_cache.py b/modelcache/manager/eviction/wtinylfu_cache.py new file mode 100644 index 0000000..d7f4272 --- /dev/null +++ b/modelcache/manager/eviction/wtinylfu_cache.py @@ -0,0 +1,129 @@ +from cachetools import LRUCache, Cache +import random +from typing import Any, Callable + +class CountMinSketch: + def __init__(self, width=1024, depth=4, decay_interval=10000): + self.width = width + self.depth = depth + self.tables = [[0]*width for _ in range(depth)] + self.seeds = [random.randrange(1<<30) for _ in range(depth)] + self.ops = 0 + self.decay_interval = decay_interval + + def _hash(self, x, seed): + return hash((x, seed)) % self.width + + def add(self, x): + self.ops += 1 + # minimal increment + est = self.estimate(x) + for i, seed in enumerate(self.seeds): + idx = self._hash(x, seed) + if self.tables[i][idx] <= est: + self.tables[i][idx] += 1 + if self.ops >= self.decay_interval: + self.decay() + self.ops = 0 + + def estimate(self, x): + return min(self.tables[i][self._hash(x, seed)] + for i, seed in enumerate(self.seeds)) + + def decay(self): + for table in self.tables: + for i in range(len(table)): + table[i] >>= 1 + + +class W2TinyLFU(Cache): + def __init__(self, maxsize, window_pct=1, on_evict: Callable[[Any], None]=None): + super().__init__(maxsize) + self.window_size = max(1, int(maxsize * window_pct / 100)) + rest = maxsize - self.window_size + self.probation_size = rest // 2 + self.protected_size = rest - self.probation_size + + self.window = LRUCache(maxsize=self.window_size) + self.probation = LRUCache(maxsize=self.probation_size) + self.protected = LRUCache(maxsize=self.protected_size) + + self.cms = CountMinSketch() + self.on_evict = on_evict + self.data = {} + + def __setitem__(self, key, value): + self.data[key] = value + self._put(key) + + def __getitem__(self, key): + val = self.get(key, default=None) + if val is None: + raise KeyError(key) + return val + + def __contains__(self, key): + return key in self.window or key in self.probation or key in self.protected + + def __delitem__(self, key): + self.data.pop(key, None) + self.window.pop(key, None) + self.probation.pop(key, None) + self.protected.pop(key, None) + + def get(self, key, default=None): + if key in self.window: + self.window[key] = True + return self.data.get(key, default) + if key in self.protected: + self.protected[key] = True + return self.data.get(key, default) + if key in self.probation: + self.probation.pop(key) + # demote LRU from protected if full + if len(self.protected) >= self.protected_size: + demoted = next(iter(self.protected)) + self.protected.pop(demoted) + self.probation[demoted] = True + self.protected[key] = True + return self.data.get(key, default) + return default + + def _put(self, key): + self.cms.add(key) + if key in self: + return + + # admission to window + if len(self.window) < self.window_size: + self.window[key] = True + return + + # window full: victim is LRU + victim = next(iter(self.window)) + self.window.pop(victim) + + if self.cms.estimate(key) >= self.cms.estimate(victim): + self._admit_to_main(victim) + self._admit_to_main(key) + else: + # victim stronger or equal: victim enters main, key is dropped + self._admit_to_main(victim) + # actually evicts key entirely + if self.on_evict: + self.on_evict(key) + self.data.pop(key, None) + + def _admit_to_main(self, key): + if key in self.protected or key in self.probation: + return + if len(self.probation) < self.probation_size: + self.probation[key] = True + else: + evicted = next(iter(self.probation)) + self.probation.pop(evicted) + self.probation[key] = True + # this eviction removes it entirely + if self.on_evict: + self.on_evict(evicted) + self.data.pop(evicted, None) diff --git a/modelcache/manager/factory.py b/modelcache/manager/factory.py index 7246cc6..b6d5b08 100644 --- a/modelcache/manager/factory.py +++ b/modelcache/manager/factory.py @@ -8,9 +8,9 @@ def get_data_manager( cache_base: Union[CacheBase, str] = None, vector_base: Union[VectorBase, str] = None, object_base: Union[ObjectBase, str] = None, - max_size: int = 1000, - clean_size: int = None, - eviction: str = "LRU", + max_size: int = 3, + clean_size: int = 1, + eviction: str = "ARC", data_path: str = "data_map.txt", get_data_container: Callable = None, ): diff --git a/modelcache/manager/scalar_data/base.py b/modelcache/manager/scalar_data/base.py index 6db058d..607db95 100644 --- a/modelcache/manager/scalar_data/base.py +++ b/modelcache/manager/scalar_data/base.py @@ -128,3 +128,8 @@ def flush(self): @abstractmethod def close(self): pass + + @abstractmethod + def batch_insert(self, all_data: List[CacheData]): + pass + diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index dee2a73..c339285 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -131,7 +131,7 @@ def insert_query_resp(self, query_resp, **kwargs): def get_data_by_id(self, key: int): table_name = "modelcache_llm_answer" query_sql = f""" - SELECT question, answer, embedding_data, model + SELECT answer, question, embedding_data, model FROM {table_name} WHERE id = %s """ diff --git a/modelcache/manager/vector_data/milvus.py b/modelcache/manager/vector_data/milvus.py index b16d884..1d04430 100644 --- a/modelcache/manager/vector_data/milvus.py +++ b/modelcache/manager/vector_data/milvus.py @@ -168,7 +168,7 @@ def delete(self, ids, model=None): collection_name_model = self.collection_name + '_' + model col = self._get_collection(collection_name_model) - del_ids = ",".join([str(x) for x in ids]) + del_ids = ",".join([f'"{x}"' for x in ids]) resp = col.delete(f"id in [{del_ids}]") delete_count = resp.delete_count return delete_count From f8d4e72d8d9ad179ec2b0e0f143789d991a4837f Mon Sep 17 00:00:00 2001 From: Yuval Roth Date: Tue, 10 Jun 2025 01:31:11 +0300 Subject: [PATCH 90/98] Project-wide refactoring, created a single point of init and entry to caching logic * Separated API logic from cache logic and init logic and by that deduplicated all the cache init code and the parsing and handling logic that was inside the files 'fastpi4modelcache.py', 'fastpi4modelcache_demo.py', 'flask4modelcache.py' and 'flask4modelcache_demo.py' moved all that logic into Cache class. * The cache init code is now modular and extendable and is contained inside the static Cache.init() function. Added EmbeddingModel and MetricType enums that control which logic runs in the adapters and is configured in the init code * All the cache configuration is now held inside the Cache object instead of being spread around the entire project * Deduplicated classes, interfaces and factory methods. Removed some unused dead code. * Moved factory methods into their respective Interface instead of being static global functions with arbitrary names --- fastapi4modelcache.py | 144 +------- fastapi4modelcache_demo.py | 121 +------ flask4modelcache.py | 217 +----------- flask4modelcache_demo.py | 151 +-------- modelcache/__init__.py | 10 +- modelcache/adapter/adapter.py | 4 +- modelcache/adapter/adapter_insert.py | 9 +- modelcache/adapter/adapter_query.py | 299 ++++++++--------- modelcache/adapter/adapter_register.py | 3 +- modelcache/adapter/adapter_remove.py | 5 +- modelcache/cache.py | 311 ++++++++++++++++++ modelcache/config.py | 22 -- modelcache/core.py | 76 ----- modelcache/embedding/__init__.py | 40 +-- modelcache/embedding/base.py | 85 ++++- modelcache/embedding/huggingface.py | 48 +-- modelcache/embedding/mpnet_base.py | 17 - modelcache/embedding/string_text.py | 5 - modelcache/manager/__init__.py | 4 - modelcache/manager/data_manager.py | 58 ++-- modelcache/manager/eviction/__init__.py | 8 - modelcache/manager/eviction/manager.py | 25 -- modelcache/manager/eviction/memory_cache.py | 4 + modelcache/manager/factory.py | 27 -- modelcache/manager/object_data/__init__.py | 7 - modelcache/manager/object_data/base.py | 8 +- modelcache/manager/scalar_data/__init__.py | 8 - modelcache/manager/scalar_data/base.py | 30 +- modelcache/manager/scalar_data/manager.py | 36 -- modelcache/manager/scalar_data/sql_storage.py | 4 + .../manager/scalar_data/sql_storage_es.py | 4 + .../manager/scalar_data/sql_storage_sqlite.py | 3 + modelcache/manager/vector_data/__init__.py | 9 - modelcache/manager/vector_data/base.py | 115 ++++++- modelcache/manager/vector_data/chroma.py | 4 +- modelcache/manager/vector_data/faiss.py | 4 +- modelcache/manager/vector_data/manager.py | 127 ------- modelcache/manager/vector_data/milvus.py | 42 +-- modelcache/manager/vector_data/redis.py | 4 +- modelcache/similarity_evaluation/__init__.py | 9 - .../{similarity_evaluation.py => base.py} | 0 modelcache/similarity_evaluation/distance.py | 3 +- .../similarity_evaluation/exact_match.py | 2 +- modelcache/utils/cache_func.py | 3 - modelcache/utils/time.py | 9 +- 45 files changed, 831 insertions(+), 1293 deletions(-) create mode 100644 modelcache/cache.py delete mode 100644 modelcache/config.py delete mode 100644 modelcache/core.py delete mode 100644 modelcache/embedding/mpnet_base.py delete mode 100644 modelcache/embedding/string_text.py delete mode 100644 modelcache/manager/eviction/manager.py delete mode 100644 modelcache/manager/factory.py delete mode 100644 modelcache/manager/scalar_data/manager.py delete mode 100644 modelcache/manager/vector_data/manager.py rename modelcache/similarity_evaluation/{similarity_evaluation.py => base.py} (100%) delete mode 100644 modelcache/utils/cache_func.py diff --git a/fastapi4modelcache.py b/fastapi4modelcache.py index f9d0267..d3d39bf 100644 --- a/fastapi4modelcache.py +++ b/fastapi4modelcache.py @@ -1,80 +1,19 @@ # -*- coding: utf-8 -*- -import time import uvicorn -import asyncio -import logging -import configparser import json from fastapi import FastAPI, Request, HTTPException -from pydantic import BaseModel -from concurrent.futures import ThreadPoolExecutor -from starlette.responses import PlainTextResponse -import functools - -from modelcache import cache -from modelcache.adapter import adapter -from modelcache.manager import CacheBase, VectorBase, get_data_manager -from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation -from modelcache.processor.pre import query_multi_splicing -from modelcache.processor.pre import insert_multi_splicing -from modelcache.utils.model_filter import model_blacklist_filter -from modelcache.embedding import Data2VecAudio +from modelcache.cache import Cache #创建一个FastAPI实例 app = FastAPI() -class RequestData(BaseModel): - type: str - scope: dict = None - query: str = None - chat_info: dict = None - remove_type: str = None - id_list: list = [] - -data2vec = Data2VecAudio() -mysql_config = configparser.ConfigParser() -mysql_config.read('modelcache/config/mysql_config.ini') - -milvus_config = configparser.ConfigParser() -milvus_config.read('modelcache/config/milvus_config.ini') - -# redis_config = configparser.ConfigParser() -# redis_config.read('modelcache/config/redis_config.ini') - -# 初始化datamanager -data_manager = get_data_manager( - CacheBase("mysql", config=mysql_config), - VectorBase("milvus", dimension=data2vec.dimension, milvus_config=milvus_config) -) - -# # 使用redis初始化datamanager -# data_manager = get_data_manager( -# CacheBase("mysql", config=mysql_config), -# VectorBase("redis", dimension=data2vec.dimension, redis_config=redis_config) -# ) +cache = Cache.init("mysql", "milvus") -cache.init( - embedding_func=data2vec.to_embeddings, - data_manager=data_manager, - similarity_evaluation=SearchDistanceEvaluation(), - query_pre_embedding_func=query_multi_splicing, - insert_pre_embedding_func=insert_multi_splicing, -) - -executor = ThreadPoolExecutor(max_workers=6) - -# 异步保存查询信息 -async def save_query_info(result, model, query, delta_time_log): - loop = asyncio.get_running_loop() - func = functools.partial(cache.data_manager.save_query_resp, result, model=model, query=json.dumps(query, ensure_ascii=False), delta_time=delta_time_log) - await loop.run_in_executor(None, func) - - - -@app.get("/welcome", response_class=PlainTextResponse) +@app.get("/welcome") async def first_fastapi(): return "hello, modelcache!" + @app.post("/modelcache") async def user_backend(request: Request): try: @@ -90,7 +29,7 @@ async def user_backend(request: Request): # 如果无法解析,返回格式错误 result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - asyncio.create_task(save_query_info(result, model='', query='', delta_time_log=0)) + cache.save_query_info(result, model='', query='', delta_time_log=0) raise HTTPException(status_code=101, detail="Invalid JSON format") else: request_data = raw_body @@ -102,19 +41,7 @@ async def user_backend(request: Request): except json.JSONDecodeError: raise HTTPException(status_code=101, detail="Invalid JSON format") - request_type = request_data.get('type') - model = None - if 'scope' in request_data: - model = request_data['scope'].get('model', '').replace('-', '_').replace('.', '_') - query = request_data.get('query') - chat_info = request_data.get('chat_info') - - if not request_type or request_type not in ['query', 'insert', 'remove', 'register']: - result = {"errorCode": 102, - "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", - "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - asyncio.create_task(save_query_info(result, model=model, query='', delta_time_log=0)) - raise HTTPException(status_code=102, detail="Type exception, should be one of ['query', 'insert', 'remove', 'register']") + return cache.handle_request(request_data) except Exception as e: request_data = raw_body if 'raw_body' in locals() else None @@ -129,65 +56,6 @@ async def user_backend(request: Request): } return result - - # model filter - filter_resp = model_blacklist_filter(model, request_type) - if isinstance(filter_resp, dict): - return filter_resp - - if request_type == 'query': - try: - start_time = time.time() - response = adapter.ChatCompletion.create_query(scope={"model": model}, query=query) - delta_time = f"{round(time.time() - start_time, 2)}s" - - if response is None: - result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} - elif response in ['adapt_query_exception']: - result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, - "hit_query": '', "answer": ''} - else: - answer = response['data'] - hit_query = response['hitQuery'] - result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, "hit_query": hit_query, "answer": answer} - - delta_time_log = round(time.time() - start_time, 2) - asyncio.create_task(save_query_info(result, model, query, delta_time_log)) - return result - except Exception as e: - result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, - "hit_query": '', "answer": ''} - logging.info(f'result: {str(result)}') - return result - - if request_type == 'insert': - try: - response = adapter.ChatCompletion.create_insert(model=model, chat_info=chat_info) - if response == 'success': - return {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} - else: - return {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} - except Exception as e: - return {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} - - if request_type == 'remove': - response = adapter.ChatCompletion.create_remove(model=model, remove_type=request_data.get("remove_type"), id_list=request_data.get("id_list")) - if not isinstance(response, dict): - return {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} - - state = response.get('status') - if state == 'success': - return {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} - else: - return {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} - - if request_type == 'register': - response = adapter.ChatCompletion.create_register(model=model) - if response in ['create_success', 'already_exists']: - return {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} - else: - return {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} - # TODO: 可以修改为在命令行中使用`uvicorn your_module_name:app --host 0.0.0.0 --port 5000 --reload`的命令启动 if __name__ == '__main__': uvicorn.run(app, host='0.0.0.0', port=5000) \ No newline at end of file diff --git a/fastapi4modelcache_demo.py b/fastapi4modelcache_demo.py index 624fc75..84058fa 100644 --- a/fastapi4modelcache_demo.py +++ b/fastapi4modelcache_demo.py @@ -1,59 +1,16 @@ # -*- coding: utf-8 -*- -import time import uvicorn -import asyncio -import logging -# import configparser import json from fastapi import FastAPI, Request, HTTPException -from pydantic import BaseModel -from concurrent.futures import ThreadPoolExecutor -from starlette.responses import PlainTextResponse -import functools -from modelcache import cache -from modelcache.adapter import adapter -from modelcache.manager import CacheBase, VectorBase, get_data_manager -from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation -from modelcache.processor.pre import query_multi_splicing -from modelcache.processor.pre import insert_multi_splicing -from modelcache.utils.model_filter import model_blacklist_filter -from modelcache.embedding import Data2VecAudio +from modelcache.cache import Cache # 创建一个FastAPI实例 app = FastAPI() -class RequestData(BaseModel): - type: str - scope: dict = None - query: str = None - chat_info: list = None - remove_type: str = None - id_list: list = [] +cache = Cache.init("sqlite", "faiss") -data2vec = Data2VecAudio() - -data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension)) - -cache.init( - embedding_func=data2vec.to_embeddings, - data_manager=data_manager, - similarity_evaluation=SearchDistanceEvaluation(), - query_pre_embedding_func=query_multi_splicing, - insert_pre_embedding_func=insert_multi_splicing, -) - -executor = ThreadPoolExecutor(max_workers=6) - -# 异步保存查询信息 -async def save_query_info_fastapi(result, model, query, delta_time_log): - loop = asyncio.get_running_loop() - func = functools.partial(cache.data_manager.save_query_resp, result, model=model, query=json.dumps(query, ensure_ascii=False), delta_time=delta_time_log) - await loop.run_in_executor(None, func) - - - -@app.get("/welcome", response_class=PlainTextResponse) +@app.get("/welcome") async def first_fastapi(): return "hello, modelcache!" @@ -68,9 +25,12 @@ async def user_backend(request: Request): try: # 尝试将字符串解析为JSON对象 request_data = json.loads(raw_body) - except json.JSONDecodeError: + except json.JSONDecodeError as e: # 如果无法解析,返回格式错误 - raise HTTPException(status_code=400, detail="Invalid JSON format") + result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + cache.save_query_info(result, model='', query='', delta_time_log=0) + raise HTTPException(status_code=101, detail="Invalid JSON format") else: request_data = raw_body @@ -79,17 +39,9 @@ async def user_backend(request: Request): try: request_data = json.loads(request_data) except json.JSONDecodeError: - raise HTTPException(status_code=400, detail="Invalid JSON format") - - request_type = request_data.get('type') - model = None - if 'scope' in request_data: - model = request_data['scope'].get('model', '').replace('-', '_').replace('.', '_') - query = request_data.get('query') - chat_info = request_data.get('chat_info') + raise HTTPException(status_code=101, detail="Invalid JSON format") - if not request_type or request_type not in ['query', 'insert', 'remove', 'detox']: - raise HTTPException(status_code=400, detail="Type exception, should be one of ['query', 'insert', 'remove', 'detox']") + return cache.handle_request(request_data) except Exception as e: request_data = raw_body if 'raw_body' in locals() else None @@ -104,59 +56,6 @@ async def user_backend(request: Request): } return result - - # model filter - filter_resp = model_blacklist_filter(model, request_type) - if isinstance(filter_resp, dict): - return filter_resp - - if request_type == 'query': - try: - start_time = time.time() - response = adapter.ChatCompletion.create_query(scope={"model": model}, query=query) - delta_time = f"{round(time.time() - start_time, 2)}s" - - if response is None: - result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} - elif response in ['adapt_query_exception']: - # elif isinstance(response, str): - result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, - "hit_query": '', "answer": ''} - else: - answer = response['data'] - hit_query = response['hitQuery'] - result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, "hit_query": hit_query, "answer": answer} - - delta_time_log = round(time.time() - start_time, 2) - asyncio.create_task(save_query_info_fastapi(result, model, query, delta_time_log)) - return result - except Exception as e: - result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, - "hit_query": '', "answer": ''} - logging.info(f'result: {str(result)}') - return result - - if request_type == 'insert': - try: - response = adapter.ChatCompletion.create_insert(model=model, chat_info=chat_info) - if response == 'success': - return {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} - else: - return {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} - except Exception as e: - return {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} - - if request_type == 'remove': - response = adapter.ChatCompletion.create_remove(model=model, remove_type=request_data.get("remove_type"), id_list=request_data.get("id_list")) - if not isinstance(response, dict): - return {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} - - state = response.get('status') - if state == 'success': - return {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} - else: - return {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} - # TODO: 可以修改为在命令行中使用`uvicorn your_module_name:app --host 0.0.0.0 --port 5000 --reload`的命令启动 if __name__ == '__main__': uvicorn.run(app, host='0.0.0.0', port=5000) \ No newline at end of file diff --git a/flask4modelcache.py b/flask4modelcache.py index d793953..0ca949d 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -1,112 +1,12 @@ # -*- coding: utf-8 -*- -import time from flask import Flask, request -import logging -import configparser import json -from modelcache import cache -from modelcache.adapter import adapter -from modelcache.embedding.mpnet_base import MPNet_Base -from modelcache.manager.vector_data import manager -from modelcache.manager import CacheBase, VectorBase, get_data_manager, data_manager -from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation -from modelcache.processor.pre import query_multi_splicing,insert_multi_splicing, query_with_role -from concurrent.futures import ThreadPoolExecutor -from modelcache.utils.model_filter import model_blacklist_filter -from modelcache.embedding import Data2VecAudio +from modelcache.cache import Cache # 创建一个Flask实例 app = Flask(__name__) - -def response_text(cache_resp): - return cache_resp['data'] - - -def save_query_info(result, model, query, delta_time_log): - cache.data_manager.save_query_resp(result, model=model, query=json.dumps(query, ensure_ascii=False), - delta_time=delta_time_log) - - -def response_hitquery(cache_resp): - return cache_resp['hitQuery'] - -manager.MPNet_base = True - -if manager.MPNet_base: - mpnet_base = MPNet_Base() - embedding_func = mpnet_base.to_embeddings - dimension = mpnet_base.dimension - data_manager.NORMALIZE = False - query_pre_embedding_func=query_with_role - insert_pre_embedding_func=query_with_role -else: - data2vec = Data2VecAudio() - embedding_func = data2vec.to_embeddings - dimension = data2vec.dimension - query_pre_embedding_func=query_multi_splicing - insert_pre_embedding_func=insert_multi_splicing - -mysql_config = configparser.ConfigParser() -mysql_config.read('modelcache/config/mysql_config.ini') - -milvus_config = configparser.ConfigParser() -milvus_config.read('modelcache/config/milvus_config.ini') - -# es_config = configparser.ConfigParser() -# es_config.read('modelcache/config/elasticsearch_config.ini') - -# redis_config = configparser.ConfigParser() -# redis_config.read('modelcache/config/redis_config.ini') - -# chromadb_config = configparser.ConfigParser() -# chromadb_config.read('modelcache/config/chromadb_config.ini') - -data_manager = get_data_manager( - CacheBase("mysql", config=mysql_config), - VectorBase("milvus", - dimension=dimension, - milvus_config=milvus_config, - index_params={ - "metric_type": "COSINE", - "index_type": "HNSW", - "params": {"M": 16, "efConstruction": 64}, - } if manager.MPNet_base else None, - search_params={ - "IVF_FLAT": {"metric_type": "COSINE", "params": {"nprobe": 10}}, - "IVF_SQ8": {"metric_type": "COSINE", "params": {"nprobe": 10}}, - "IVF_PQ": {"metric_type": "COSINE", "params": {"nprobe": 10}}, - "HNSW": {"metric_type": "COSINE", "params": {"ef": 10}}, - "RHNSW_FLAT": {"metric_type": "COSINE", "params": {"ef": 10}}, - "RHNSW_SQ": {"metric_type": "COSINE", "params": {"ef": 10}}, - "RHNSW_PQ": {"metric_type": "COSINE", "params": {"ef": 10}}, - "IVF_HNSW": {"metric_type": "COSINE", "params": {"nprobe": 10, "ef": 10}}, - "ANNOY": {"metric_type": "COSINE", "params": {"search_k": 10}}, - "AUTOINDEX": {"metric_type": "COSINE", "params": {}}, - } if manager.MPNet_base else None - ), - eviction='WTINYLFU', - max_size=100000 -) - - -# data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), -# VectorBase("chromadb", dimension=data2vec.dimension, chromadb_config=chromadb_config)) - -# data_manager = get_data_manager(CacheBase("mysql", config=mysql_config), -# VectorBase("redis", dimension=data2vec.dimension, redis_config=redis_config)) - -cache.init( - embedding_func=embedding_func, - data_manager=data_manager, - similarity_evaluation=SearchDistanceEvaluation(), - query_pre_embedding_func=query_pre_embedding_func, - insert_pre_embedding_func=insert_pre_embedding_func, -) - -global executor -executor = ThreadPoolExecutor(max_workers=6) - +cache = Cache.init("mysql","milvus") @app.route('/welcome') def first_flask(): # 视图函数 @@ -115,123 +15,18 @@ def first_flask(): # 视图函数 @app.route('/modelcache', methods=['GET', 'POST']) def user_backend(): - param_dict = [] + param_dict = {} try: if request.method == 'POST': param_dict = request.json elif request.method == 'GET': param_dict = request.args - except Exception as e: - result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', - "answer": ''} - cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) - return json.dumps(result) - - # param parsing - try: - request_type = param_dict.get("type") - - scope = param_dict.get("scope") - if scope is not None: - model = scope.get('model') - model = model.replace('-', '_') - model = model.replace('.', '_') - query = param_dict.get("query") - chat_info = param_dict.get("chat_info") - if request_type is None or request_type not in ['query', 'insert', 'remove', 'register']: - result = {"errorCode": 102, - "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", - "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - cache.data_manager.save_query_resp(result, model=model, query='', delta_time=0) - return json.dumps(result) + return json.dumps(cache.handle_request(param_dict)) except Exception as e: - result = {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - return json.dumps(result) - - # model filter - filter_resp = model_blacklist_filter(model, request_type) - if isinstance(filter_resp, dict): - return json.dumps(filter_resp) - - if request_type == 'query': - try: - start_time = time.time() - response = adapter.ChatCompletion.create_query( - scope={"model": model}, - query=query - ) - delta_time = '{}s'.format(round(time.time() - start_time, 2)) - if response is None: - result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', - "answer": ''} - # elif response in ['adapt_query_exception']: - elif isinstance(response, str): - result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, - "hit_query": '', "answer": ''} - else: - answer = response_text(response) - hit_query = response_hitquery(response) - result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, - "hit_query": hit_query, "answer": answer} - delta_time_log = round(time.time() - start_time, 2) - future = executor.submit(save_query_info, result, model, query, delta_time_log) - except Exception as e: - result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, - "hit_query": '', "answer": ''} - logging.info('result: {}'.format(result)) - - return json.dumps(result, ensure_ascii=False) - - if request_type == 'insert': - try: - try: - response = adapter.ChatCompletion.create_insert( - model=model, - chat_info=chat_info - ) - except Exception as e: - result = {"errorCode": 302, "errorDesc": str(e), "writeStatus": "exception"} - return json.dumps(result, ensure_ascii=False) - - if response == 'success': - result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} - else: - result = {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} - return json.dumps(result, ensure_ascii=False) - except Exception as e: - result = {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} - return json.dumps(result, ensure_ascii=False) - - if request_type == 'remove': - remove_type = param_dict.get("remove_type") - id_list = param_dict.get("id_list", []) - - response = adapter.ChatCompletion.create_remove( - model=model, - remove_type=remove_type, - id_list=id_list - ) - if not isinstance(response, dict): - result = {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} - return json.dumps(result) - - state = response.get('status') - if state == 'success': - result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} - else: - result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} - return json.dumps(result) - - if request_type == 'register': - response = adapter.ChatCompletion.create_register( - model=model - ) - if response in ['create_success', 'already_exists']: - result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} - else: - result = {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} + cache.save_query_resp(result, model='', query='', delta_time=0) return json.dumps(result) diff --git a/flask4modelcache_demo.py b/flask4modelcache_demo.py index 54b9e8e..e0d5cfb 100644 --- a/flask4modelcache_demo.py +++ b/flask4modelcache_demo.py @@ -1,51 +1,12 @@ # -*- coding: utf-8 -*- -import time from flask import Flask, request -import logging import json -from modelcache import cache -from modelcache.adapter import adapter -from modelcache.manager import CacheBase, VectorBase, get_data_manager -from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation -from modelcache.processor.pre import query_multi_splicing -from modelcache.processor.pre import insert_multi_splicing -from concurrent.futures import ThreadPoolExecutor -from modelcache.utils.model_filter import model_blacklist_filter -from modelcache.embedding import Data2VecAudio +from modelcache.cache import Cache # 创建一个Flask实例 app = Flask(__name__) - -def response_text(cache_resp): - return cache_resp['data'] - - -def save_query_info(result, model, query, delta_time_log): - cache.data_manager.save_query_resp(result, model=model, query=json.dumps(query, ensure_ascii=False), - delta_time=delta_time_log) - - -def response_hitquery(cache_resp): - return cache_resp['hitQuery'] - - -data2vec = Data2VecAudio() -data_manager = get_data_manager(CacheBase("sqlite"), VectorBase("faiss", dimension=data2vec.dimension)) - - -cache.init( - embedding_func=data2vec.to_embeddings, - data_manager=data_manager, - similarity_evaluation=SearchDistanceEvaluation(), - query_pre_embedding_func=query_multi_splicing, - insert_pre_embedding_func=insert_multi_splicing, -) - -# cache.set_openai_key() -global executor -executor = ThreadPoolExecutor(max_workers=6) - +cache = Cache.init("sqlite","faiss") @app.route('/welcome') def first_flask(): # 视图函数 @@ -54,116 +15,20 @@ def first_flask(): # 视图函数 @app.route('/modelcache', methods=['GET', 'POST']) def user_backend(): + param_dict = {} try: if request.method == 'POST': - request_data = request.json + param_dict = request.json elif request.method == 'GET': - request_data = request.args - param_dict = json.loads(request_data) - except Exception as e: - result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', - "answer": ''} - cache.data_manager.save_query_resp(result, model='', query='', delta_time=0) - return json.dumps(result) + param_dict = request.args - # param parsing - try: - request_type = param_dict.get("type") - scope = param_dict.get("scope") - if scope is not None: - model = scope.get('model') - model = model.replace('-', '_') - model = model.replace('.', '_') - query = param_dict.get("query") - chat_info = param_dict.get("chat_info") - if request_type is None or request_type not in ['query', 'insert', 'detox', 'remove']: - result = {"errorCode": 102, - "errorDesc": "type exception, should one of ['query', 'insert', 'detox', 'remove']", - "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - cache.data_manager.save_query_resp(result, model=model, query='', delta_time=0) - return json.dumps(result) + return json.dumps(cache.handle_request(param_dict)) except Exception as e: - result = {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - return json.dumps(result) - - # model filter - filter_resp = model_blacklist_filter(model, request_type) - if isinstance(filter_resp, dict): - return json.dumps(filter_resp) - - if request_type == 'query': - try: - start_time = time.time() - response = adapter.ChatCompletion.create_query( - scope={"model": model}, - query=query - ) - delta_time = '{}s'.format(round(time.time() - start_time, 2)) - if response is None: - result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', - "answer": ''} - elif response in ['adapt_query_exception']: - result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, - "hit_query": '', "answer": ''} - else: - answer = response_text(response) - hit_query = response_hitquery(response) - result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, - "hit_query": hit_query, "answer": answer} - delta_time_log = round(time.time() - start_time, 2) - future = executor.submit(save_query_info, result, model, query, delta_time_log) - except Exception as e: - result = {"errorCode": 202, "errorDesc": e, "cacheHit": False, "delta_time": 0, - "hit_query": '', "answer": ''} - logging.info('result: {}'.format(result)) - return json.dumps(result, ensure_ascii=False) - - if request_type == 'insert': - try: - try: - response = adapter.ChatCompletion.create_insert( - model=model, - chat_info=chat_info - ) - except Exception as e: - result = {"errorCode": 303, "errorDesc": e, "writeStatus": "exception"} - return json.dumps(result, ensure_ascii=False) - - if response in ['adapt_insert_exception']: - result = {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} - elif response == 'success': - result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} - else: - result = {"errorCode": 302, "errorDesc": response, - "writeStatus": "exception"} - return json.dumps(result, ensure_ascii=False) - except Exception as e: - result = {"errorCode": 304, "errorDesc": e, "writeStatus": "exception"} - return json.dumps(result, ensure_ascii=False) - - if request_type == 'remove': - remove_type = param_dict.get("remove_type") - id_list = param_dict.get("id_list", []) - - response = adapter.ChatCompletion.create_remove( - model=model, - remove_type=remove_type, - id_list=id_list - ) - - if not isinstance(response, dict): - result = {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} - return json.dumps(result) - - state = response.get('status') - if state == 'success': - result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} - else: - result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} + cache.save_query_resp(result, model='', query='', delta_time=0) return json.dumps(result) if __name__ == '__main__': - # app.run(host='0.0.0.0', port=5000, debug=True) app.run(host='0.0.0.0', port=5000) diff --git a/modelcache/__init__.py b/modelcache/__init__.py index 9a82563..8bcb18a 100644 --- a/modelcache/__init__.py +++ b/modelcache/__init__.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- -from modelcache.core import Cache -from modelcache.core import cache -from modelcache.config import Config -import modelcache.adapter + + + + + + diff --git a/modelcache/adapter/adapter.py b/modelcache/adapter/adapter.py index ddad9af..204841b 100644 --- a/modelcache/adapter/adapter.py +++ b/modelcache/adapter/adapter.py @@ -20,6 +20,7 @@ def cache_data_convert(cache_data, cache_query): **kwargs ) except Exception as e: + print(e) return str(e) @classmethod @@ -41,7 +42,7 @@ def create_remove(cls, *args, **kwargs): **kwargs ) except Exception as e: - logging.info('adapt_remove_e: {}'.format(e)) + print(e) return str(e) @classmethod @@ -52,6 +53,7 @@ def create_register(cls, *args, **kwargs): **kwargs ) except Exception as e: + print(e) return str(e) diff --git a/modelcache/adapter/adapter_insert.py b/modelcache/adapter/adapter_insert.py index c2e2a29..a507ee1 100644 --- a/modelcache/adapter/adapter_insert.py +++ b/modelcache/adapter/adapter_insert.py @@ -1,18 +1,14 @@ # -*- coding: utf-8 -*- -from modelcache import cache from modelcache.utils.error import NotInitError from modelcache.utils.time import time_cal def adapt_insert(*args, **kwargs): - chat_cache = kwargs.pop("cache_obj", cache) + chat_cache = kwargs.pop("cache_obj") model = kwargs.pop("model", None) require_object_store = kwargs.pop("require_object_store", False) if require_object_store: assert chat_cache.data_manager.o, "Object store is required for adapter." - if not chat_cache.has_init: - raise NotInitError() - cache_enable = chat_cache.cache_enable_func(*args, **kwargs) context = kwargs.pop("cache_context", {}) chat_info = kwargs.pop("chat_info", []) @@ -24,7 +20,7 @@ def adapt_insert(*args, **kwargs): pre_embedding_data = chat_cache.insert_pre_embedding_func( row, extra_param=context.get("pre_embedding_func", None), - prompts=chat_cache.config.prompts, + prompts=chat_cache.prompts, ) pre_embedding_data_list.append(pre_embedding_data) llm_data_list.append(row['answer']) @@ -32,6 +28,7 @@ def adapt_insert(*args, **kwargs): chat_cache.embedding_func, func_name="embedding", report_func=chat_cache.report.embedding, + cache_obj=chat_cache )(pre_embedding_data) embedding_data_list.append(embedding_data) diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index aa42eae..2e56cc2 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -1,103 +1,150 @@ # -*- coding: utf-8 -*- import logging -import time - -from modelcache import cache -from modelcache.utils.error import NotInitError +from modelcache.embedding import MetricType from modelcache.utils.time import time_cal -from modelcache.processor.pre import multi_analysis from FlagEmbedding import FlagReranker -from modelcache.manager.vector_data import manager USE_RERANKER = False # 如果为 True 则启用 reranker,否则使用原有逻辑 def adapt_query(cache_data_convert, *args, **kwargs): - chat_cache = kwargs.pop("cache_obj", cache) + chat_cache = kwargs.pop("cache_obj") scope = kwargs.pop("scope") model = scope['model'] - if not chat_cache.has_init: - raise NotInitError() - cache_enable = chat_cache.cache_enable_func(*args, **kwargs) context = kwargs.pop("cache_context", {}) - embedding_data = None cache_factor = kwargs.pop("cache_factor", 1.0) pre_embedding_data = chat_cache.query_pre_embedding_func( kwargs, extra_param=context.get("pre_embedding_func", None), - prompts=chat_cache.config.prompts, + prompts=chat_cache.prompts, ) - if cache_enable: - embedding_data = time_cal( - chat_cache.embedding_func, - func_name="embedding", - report_func=chat_cache.report.embedding, - )(pre_embedding_data) - if cache_enable: - cache_data_list = time_cal( - chat_cache.data_manager.search, - func_name="vector_search", - report_func=chat_cache.report.search, - )( - embedding_data, - extra_param=context.get("search_func", None), - top_k=kwargs.pop("top_k", -1), - model=model - ) - cache_answers = [] - cache_questions = [] - cache_ids = [] + embedding_data = time_cal( + chat_cache.embedding_func, + func_name="embedding", + report_func=chat_cache.report.embedding, + cache_obj=chat_cache + )(pre_embedding_data) + + cache_data_list = time_cal( + chat_cache.data_manager.search, + func_name="vector_search", + report_func=chat_cache.report.search, + cache_obj=chat_cache + )( + embedding_data, + extra_param=context.get("search_func", None), + top_k=kwargs.pop("top_k", -1), + model=model + ) + cache_answers = [] + cache_questions = [] + cache_ids = [] + cosine_similarity = None + + if chat_cache.similarity_metric_type == MetricType.COSINE: cosine_similarity = cache_data_list[0][0] + # This code uses the built-in cosine similarity evaluation in milvus + if cosine_similarity < 0.9: + return None + elif chat_cache.similarity_metric_type == MetricType.L2: + ## this is the code that uses L2 for similarity evaluation + similarity_threshold = chat_cache.similarity_threshold + similarity_threshold_long = chat_cache.similarity_threshold_long - if manager.MPNet_base: - # This code uses the built-in cosine similarity evaluation in milvus - if cosine_similarity < 0.9: - return None + min_rank, max_rank = chat_cache.similarity_evaluation.range() + rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor + rank_threshold_long = (max_rank - min_rank) * similarity_threshold_long * cache_factor + rank_threshold = ( + max_rank + if rank_threshold > max_rank + else min_rank + if rank_threshold < min_rank + else rank_threshold + ) + rank_threshold_long = ( + max_rank + if rank_threshold_long > max_rank + else min_rank + if rank_threshold_long < min_rank + else rank_threshold_long + ) + if cache_data_list is None or len(cache_data_list) == 0: + rank_pre = -1.0 else: - ## this is the code that uses L2 for similarity evaluation - similarity_threshold = chat_cache.config.similarity_threshold - similarity_threshold_long = chat_cache.config.similarity_threshold_long - - min_rank, max_rank = chat_cache.similarity_evaluation.range() - rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor - rank_threshold_long = (max_rank - min_rank) * similarity_threshold_long * cache_factor - rank_threshold = ( - max_rank - if rank_threshold > max_rank - else min_rank - if rank_threshold < min_rank - else rank_threshold + cache_data_dict = {'search_result': cache_data_list[0]} + rank_pre = chat_cache.similarity_evaluation.evaluation( + None, + cache_data_dict, + extra_param=context.get("evaluation_func", None), ) - rank_threshold_long = ( - max_rank - if rank_threshold_long > max_rank - else min_rank - if rank_threshold_long < min_rank - else rank_threshold_long + if rank_pre < rank_threshold: + return None + else: + raise ValueError( + f"Unsupported similarity metric type: {chat_cache.similarity_metric_type}" + ) + + if USE_RERANKER: + reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=False) + for cache_data in cache_data_list: + primary_id = cache_data[1] + ret = chat_cache.data_manager.get_scalar_data( + cache_data, extra_param=context.get("get_scalar_data", None),model=model ) - if cache_data_list is None or len(cache_data_list) == 0: - rank_pre = -1.0 + if ret is None: + continue + + rank = reranker.compute_score([pre_embedding_data, ret[0]], normalize=True)[0] + + if "deps" in context and hasattr(ret.question, "deps"): + eval_query_data = { + "question": context["deps"][0]["data"], + "embedding": None + } + eval_cache_data = { + "question": ret.question.deps[0].data, + "answer": ret.answers[0].answer, + "search_result": cache_data, + "embedding": None, + } else: - cache_data_dict = {'search_result': cache_data_list[0]} - rank_pre = chat_cache.similarity_evaluation.evaluation( - None, - cache_data_dict, - extra_param=context.get("evaluation_func", None), - ) - if rank_pre < rank_threshold: - return None + eval_query_data = { + "question": pre_embedding_data, + "embedding": embedding_data, + } - if USE_RERANKER and not manager.MPNet_base: - reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=False) - for cache_data in cache_data_list: - primary_id = cache_data[1] - ret = chat_cache.data_manager.get_scalar_data( - cache_data, extra_param=context.get("get_scalar_data", None),model=model - ) - if ret is None: - continue + eval_cache_data = { + "question": ret[0], + "answer": ret[1], + "search_result": cache_data, + "embedding": None + } - rank = reranker.compute_score([pre_embedding_data, ret[0]], normalize=True)[0] + if len(pre_embedding_data) <= 256: + if rank_threshold <= rank: + cache_answers.append((rank, ret[0])) + cache_questions.append((rank, ret[1])) + cache_ids.append((rank, primary_id)) + else: + if rank_threshold_long <= rank: + cache_answers.append((rank, ret[0])) + cache_questions.append((rank, ret[1])) + cache_ids.append((rank, primary_id)) + else: + # 不使用 reranker 时,走原来的逻辑 + for cache_data in cache_data_list: + primary_id = cache_data[1] + ret = chat_cache.data_manager.get_scalar_data( + cache_data, extra_param=context.get("get_scalar_data", None),model=model + ) + if ret is None: + continue + if chat_cache.similarity_metric_type == MetricType.COSINE: + assert cosine_similarity is not None, "cosine_similarity should not be None" + cache_answers.append((cosine_similarity, ret[0])) + cache_questions.append((cosine_similarity, ret[1])) + cache_ids.append((cosine_similarity, primary_id)) + elif chat_cache.similarity_metric_type == MetricType.L2: if "deps" in context and hasattr(ret.question, "deps"): eval_query_data = { "question": context["deps"][0]["data"], @@ -121,6 +168,11 @@ def adapt_query(cache_data_convert, *args, **kwargs): "search_result": cache_data, "embedding": None } + rank = chat_cache.similarity_evaluation.evaluation( + eval_query_data, + eval_cache_data, + extra_param=context.get("evaluation_func", None), + ) if len(pre_embedding_data) <= 256: if rank_threshold <= rank: @@ -132,81 +184,30 @@ def adapt_query(cache_data_convert, *args, **kwargs): cache_answers.append((rank, ret[0])) cache_questions.append((rank, ret[1])) cache_ids.append((rank, primary_id)) - else: - # 不使用 reranker 时,走原来的逻辑 - for cache_data in cache_data_list: - primary_id = cache_data[1] - ret = chat_cache.data_manager.get_scalar_data( - cache_data, extra_param=context.get("get_scalar_data", None),model=model + else: + raise ValueError( + f"Unsupported similarity metric type: {chat_cache.similarity_metric_type}" ) - if ret is None: - continue - - if manager.MPNet_base: - cache_answers.append((cosine_similarity, ret[0])) - cache_questions.append((cosine_similarity, ret[1])) - cache_ids.append((cosine_similarity, primary_id)) - else: - if "deps" in context and hasattr(ret.question, "deps"): - eval_query_data = { - "question": context["deps"][0]["data"], - "embedding": None - } - eval_cache_data = { - "question": ret.question.deps[0].data, - "answer": ret.answers[0].answer, - "search_result": cache_data, - "embedding": None, - } - else: - eval_query_data = { - "question": pre_embedding_data, - "embedding": embedding_data, - } - - eval_cache_data = { - "question": ret[0], - "answer": ret[1], - "search_result": cache_data, - "embedding": None - } - rank = chat_cache.similarity_evaluation.evaluation( - eval_query_data, - eval_cache_data, - extra_param=context.get("evaluation_func", None), - ) - if len(pre_embedding_data) <= 256: - if rank_threshold <= rank: - cache_answers.append((rank, ret[0])) - cache_questions.append((rank, ret[1])) - cache_ids.append((rank, primary_id)) - else: - if rank_threshold_long <= rank: - cache_answers.append((rank, ret[0])) - cache_questions.append((rank, ret[1])) - cache_ids.append((rank, primary_id)) - - cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True) - cache_questions = sorted(cache_questions, key=lambda x: x[0], reverse=True) - cache_ids = sorted(cache_ids, key=lambda x: x[0], reverse=True) - if len(cache_answers) != 0: - return_message = chat_cache.post_process_messages_func( - [t[1] for t in cache_answers] - ) - return_query = chat_cache.post_process_messages_func( - [t[1] for t in cache_questions] - ) - return_id = chat_cache.post_process_messages_func( - [t[1] for t in cache_ids] - ) - # 更新命中次数 - try: - chat_cache.data_manager.update_hit_count(return_id) - except Exception: - logging.info('update_hit_count except, please check!') + cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True) + cache_questions = sorted(cache_questions, key=lambda x: x[0], reverse=True) + cache_ids = sorted(cache_ids, key=lambda x: x[0], reverse=True) + if len(cache_answers) != 0: + return_message = chat_cache.post_process_messages_func( + [t[1] for t in cache_answers] + ) + return_query = chat_cache.post_process_messages_func( + [t[1] for t in cache_questions] + ) + return_id = chat_cache.post_process_messages_func( + [t[1] for t in cache_ids] + ) + # 更新命中次数 + try: + chat_cache.data_manager.update_hit_count(return_id) + except Exception: + logging.info('update_hit_count except, please check!') - chat_cache.report.hint_cache() - return cache_data_convert(return_message, return_query) - return None + chat_cache.report.hint_cache() + return cache_data_convert(return_message, return_query) return None \ No newline at end of file diff --git a/modelcache/adapter/adapter_register.py b/modelcache/adapter/adapter_register.py index dafe597..811bcec 100644 --- a/modelcache/adapter/adapter_register.py +++ b/modelcache/adapter/adapter_register.py @@ -1,9 +1,8 @@ # -*- coding: utf-8 -*- -from modelcache import cache def adapt_register(*args, **kwargs): - chat_cache = kwargs.pop("cache_obj", cache) + chat_cache = kwargs.pop("cache_obj") model = kwargs.pop("model", None) if model is None or len(model) == 0: return ValueError('') diff --git a/modelcache/adapter/adapter_remove.py b/modelcache/adapter/adapter_remove.py index 25f1ba3..ca00fc1 100644 --- a/modelcache/adapter/adapter_remove.py +++ b/modelcache/adapter/adapter_remove.py @@ -1,17 +1,14 @@ # -*- coding: utf-8 -*- -from modelcache import cache from modelcache.utils.error import NotInitError, RemoveError def adapt_remove(*args, **kwargs): - chat_cache = kwargs.pop("cache_obj", cache) + chat_cache = kwargs.pop("cache_obj") model = kwargs.pop("model", None) remove_type = kwargs.pop("remove_type", None) require_object_store = kwargs.pop("require_object_store", False) if require_object_store: assert chat_cache.data_manager.o, "Object store is required for adapter." - if not chat_cache.has_init: - raise NotInitError() # delete data if remove_type == 'delete_by_id': diff --git a/modelcache/cache.py b/modelcache/cache.py new file mode 100644 index 0000000..8144138 --- /dev/null +++ b/modelcache/cache.py @@ -0,0 +1,311 @@ +# -*- coding: utf-8 -*- +import atexit +import json +import logging +import time +from typing import Callable, Optional, List +from modelcache.adapter import adapter +from modelcache.utils.model_filter import model_blacklist_filter +from concurrent.futures import ThreadPoolExecutor +import configparser +from modelcache.embedding.base import BaseEmbedding, EmbeddingModel, MetricType +from modelcache.manager.scalar_data.sql_storage import SQLStorage +from modelcache.manager.vector_data.base import VectorStorage +from modelcache.processor.post import first +from modelcache.processor.pre import query_with_role, query_multi_splicing, insert_multi_splicing +from modelcache.similarity_evaluation.base import SimilarityEvaluation +from modelcache.report import Report +from modelcache.similarity_evaluation.distance import SearchDistanceEvaluation +from modelcache.utils.error import CacheError +from modelcache.utils.log import modelcache_log +from modelcache.manager.data_manager import DataManager + + #=====================================================================# + #==================== Cache class definition =========================# + #=====================================================================# + +executor = ThreadPoolExecutor(max_workers=6) + +def response_text(cache_resp): + return cache_resp['data'] + +def response_hitquery(cache_resp): + return cache_resp['hitQuery'] + +# noinspection PyMethodMayBeStatic +class Cache: + def __init__( + self, + embedding_model: EmbeddingModel, + similarity_metric_type: MetricType, + data_manager: DataManager, + query_pre_embedding_func: Callable, + insert_pre_embedding_func: Callable, + embedding_func: Callable, + report: Report, # TODO: figure out why this is needed + similarity_evaluation: Optional[SimilarityEvaluation], + post_process_messages_func: Callable, + similarity_threshold: float = 0.95, + similarity_threshold_long: float = 0.95, + prompts: Optional[List[str]] = None, + log_time_func: Callable[[str, float], None] = None, + ): + if similarity_threshold < 0 or similarity_threshold > 1: + raise CacheError( + "Invalid the similarity threshold param, reasonable range: 0-1" + ) + self.data_manager: DataManager = data_manager + self.embedding_model: EmbeddingModel = embedding_model + self.similarity_metric_type: MetricType = similarity_metric_type + self.report: Report = report + self.query_pre_embedding_func: Callable = query_pre_embedding_func + self.insert_pre_embedding_func: Callable = insert_pre_embedding_func + self.embedding_func: Callable = embedding_func + self.similarity_evaluation: Optional[SimilarityEvaluation] = similarity_evaluation + self.post_process_messages_func: Callable = post_process_messages_func + self.similarity_threshold = similarity_threshold + self.similarity_threshold_long = similarity_threshold_long + self.prompts = prompts + self.log_time_func: Callable[[str, float], None] = log_time_func + + @atexit.register + def close(): + try: + self.data_manager.close() + except Exception as e: + modelcache_log.error(e) + + def save_query_resp(self, query_resp_dict, **kwargs): + self.data_manager.save_query_resp(query_resp_dict, **kwargs) + + def save_query_info(self,result, model, query, delta_time_log): + self.data_manager.save_query_resp(result, model=model, query=json.dumps(query, ensure_ascii=False), + delta_time=delta_time_log) + + def handle_request(self, param_dict: dict): + # param parsing + try: + request_type = param_dict.get("type") + + scope = param_dict.get("scope") + model = None + if scope is not None: + model = scope.get('model') + model = model.replace('-', '_') + model = model.replace('.', '_') + query = param_dict.get("query") + chat_info = param_dict.get("chat_info") + if request_type is None or request_type not in ['query', 'insert', 'remove', 'register']: + result = {"errorCode": 102, + "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", + "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + self.data_manager.save_query_resp(result, model=model, query='', delta_time=0) + return result + except Exception as e: + return {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + + # model filter + filter_resp = model_blacklist_filter(model, request_type) + if isinstance(filter_resp, dict): + return filter_resp + + # handle request + if request_type == 'query': + return self.handle_query(model, query) + elif request_type == 'insert': + return self.handle_insert(chat_info, model) + elif request_type == 'remove': + return self.handle_remove(model, param_dict) + elif request_type == 'register': + return self.handle_register(model) + else: + return {"errorCode": 400, "errorDesc": "bad request"} + + def handle_register(self, model): + response = adapter.ChatCompletion.create_register( + model=model, + cache_obj=self + ) + if response in ['create_success', 'already_exists']: + result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + result = {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} + return result + + def handle_remove(self, model, param_dict): + remove_type = param_dict.get("remove_type") + id_list = param_dict.get("id_list", []) + response = adapter.ChatCompletion.create_remove( + model=model, + remove_type=remove_type, + id_list=id_list, + cache_obj=self + ) + if not isinstance(response, dict): + return {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} + state = response.get('status') + if state == 'success': + result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} + else: + result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} + return result + + def handle_insert(self, chat_info, model): + try: + try: + response = adapter.ChatCompletion.create_insert( + model=model, + chat_info=chat_info, + cache_obj=self + ) + except Exception as e: + return {"errorCode": 302, "errorDesc": str(e), "writeStatus": "exception"} + + if response == 'success': + result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} + else: + result = {"errorCode": 301, "errorDesc": response, "writeStatus": "exception"} + return result + except Exception as e: + return {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} + + def handle_query(self, model, query): + try: + start_time = time.time() + response = adapter.ChatCompletion.create_query( + scope={"model": model}, + query=query, + cache_obj=self + ) + delta_time = '{}s'.format(round(time.time() - start_time, 2)) + if response is None: + result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', + "answer": ''} + # elif response in ['adapt_query_exception']: + elif isinstance(response, str): + result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, + "hit_query": '', "answer": ''} + else: + answer = response_text(response) + hit_query = response_hitquery(response) + result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, + "hit_query": hit_query, "answer": answer} + delta_time_log = round(time.time() - start_time, 2) + executor.submit(self.save_query_info, result, model, query, delta_time_log) + except Exception as e: + result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, + "hit_query": '', "answer": ''} + logging.info('result: {}'.format(result)) + return result + + def flush(self): + self.data_manager.flush() + + @staticmethod + def init(sql_storage: str, vector_storage: str) -> 'Cache': + #================= configurations for databases ===================# + + sql_config = configparser.ConfigParser() + vector_config = configparser.ConfigParser() + + if sql_storage == "mysql": + sql_config.read('modelcache/config/mysql_config.ini') + elif sql_storage == "elasticsearch": + sql_config.read('modelcache/config/elasticsearch_config.ini') + elif sql_storage == "sqlite": + sql_config.read('modelcache/config/sqlite_config.ini') + else: + modelcache_log.error(f"Unsupported cache storage: {sql_storage}.") + raise CacheError(f"Unsupported cache storage: {sql_storage}.") + + if vector_storage == "milvus" : + vector_config.read('modelcache/config/milvus_config.ini') + elif vector_storage == "chromadb" : + vector_config.read('modelcache/config/chromadb_config.ini') + elif vector_storage == "redis" : + vector_config.read('modelcache/config/redis_config.ini') + elif vector_storage == "faiss" : + vector_config = None # faiss does not require additional configuration + else: + modelcache_log.error(f"Unsupported vector storage: {vector_storage}.") + raise CacheError(f"Unsupported vector storage: {vector_storage}.") + + + #=============== model-specific configuration =====================# + + embedding_model = EmbeddingModel.HUGGINGFACE + model_path = "sentence-transformers/all-mpnet-base-v2" + base_embedding = BaseEmbedding.get(embedding_model, model_path=model_path) + + #=== These will be used to initialize the cache ===# + query_pre_embedding_func: Callable = None + insert_pre_embedding_func: Callable = None + post_process_messages_func: Callable = None + similarity_evaluation: Optional[SimilarityEvaluation] = None + similarity_metric_type: MetricType = None + similarity_threshold: float = None + similarity_threshold_long: float = None + normalize: bool = None + #==================================================# + + # switching based on embedding_model + if embedding_model == EmbeddingModel.HUGGINGFACE: + query_pre_embedding_func = query_with_role + insert_pre_embedding_func = query_with_role + post_process_messages_func = first + similarity_evaluation = None # Uses the built-in cosine similarity evaluation in milvus + similarity_metric_type = MetricType.COSINE + similarity_threshold = 0.9 + similarity_threshold_long = 0.9 + normalize = False + + elif embedding_model == EmbeddingModel.DATA2VEC_AUDIO: + query_pre_embedding_func = query_multi_splicing + insert_pre_embedding_func = insert_multi_splicing + post_process_messages_func = first + similarity_evaluation = SearchDistanceEvaluation() + similarity_metric_type = MetricType.L2 + similarity_threshold = 0.95 + similarity_threshold_long = 0.95 + normalize = True + + # add more configurations for other embedding models as needed + else: + modelcache_log.error(f"Please add configuration for {embedding_model} in modelcache/__init__.py.") + raise CacheError(f"Please add configuration for {embedding_model} in modelcache/__init__.py.") + + # ====================== Data manager ==============================# + + data_manager = DataManager.get( + SQLStorage.get(sql_storage, config=sql_config), + VectorStorage.get( + name=vector_storage, + dimension=base_embedding.dimension, + config=vector_config, + metric_type=similarity_metric_type, + ), + eviction='WTINYLFU', + max_size=100000, + normalize=normalize, + ) + + + #================== Cache Initialization ====================# + + cache = Cache( + embedding_model = embedding_model, + similarity_metric_type = similarity_metric_type, + data_manager = data_manager, + report = Report(), + embedding_func = base_embedding.to_embeddings, + query_pre_embedding_func = query_pre_embedding_func, + insert_pre_embedding_func = insert_pre_embedding_func, + similarity_evaluation = similarity_evaluation, + post_process_messages_func = post_process_messages_func, + similarity_threshold = similarity_threshold, + similarity_threshold_long = similarity_threshold_long, + prompts = None, + log_time_func = None, + ) + return cache \ No newline at end of file diff --git a/modelcache/config.py b/modelcache/config.py deleted file mode 100644 index 69b3246..0000000 --- a/modelcache/config.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -from typing import Optional, Callable, List -from modelcache.utils.error import CacheError - - -class Config: - - def __init__( - self, - log_time_func: Optional[Callable[[str, float], None]] = None, - similarity_threshold: float = 0.95, - similarity_threshold_long: float = 0.95, - prompts: Optional[List[str]] = None - ): - if similarity_threshold < 0 or similarity_threshold > 1: - raise CacheError( - "Invalid the similarity threshold param, reasonable range: 0-1" - ) - self.log_time_func = log_time_func - self.similarity_threshold = similarity_threshold - self.similarity_threshold_long = similarity_threshold_long - self.prompts = prompts diff --git a/modelcache/core.py b/modelcache/core.py deleted file mode 100644 index bd57029..0000000 --- a/modelcache/core.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -import atexit -from typing import Optional, List, Any -from modelcache.processor.post import first -from modelcache.similarity_evaluation import ExactMatchEvaluation -from modelcache.similarity_evaluation import SimilarityEvaluation -from modelcache.embedding.string_text import to_embeddings as string_embedding -from modelcache.report import Report -from modelcache.config import Config -from modelcache.utils.cache_func import cache_all -from modelcache.utils.log import modelcache_log -from modelcache.manager import get_data_manager -from modelcache.manager.data_manager import DataManager - - -class Cache: - def __init__(self): - self.has_init = False - self.cache_enable_func = None - self.query_pre_embedding_func = None - self.insert_pre_embedding_func = None - self.mm_query_pre_embedding_func = None - self.mm_insert_pre_embedding_func = None - self.embedding_func = None - self.embedding_concurrent_func = None - self.data_manager: Optional[DataManager] = None - self.similarity_evaluation: Optional[SimilarityEvaluation] = None - self.post_process_messages_func = None - self.config = Config() - self.report = Report() - self.next_cache = None - - def init( - self, - cache_enable_func=cache_all, - query_pre_embedding_func=None, - insert_pre_embedding_func=None, - embedding_func=string_embedding, - data_manager: DataManager = get_data_manager(), - similarity_evaluation=ExactMatchEvaluation(), - post_process_messages_func=first, - config=Config(), - next_cache=None, - ): - self.has_init = True - self.cache_enable_func = cache_enable_func - self.query_pre_embedding_func = query_pre_embedding_func - self.insert_pre_embedding_func = insert_pre_embedding_func - self.embedding_func = embedding_func - self.data_manager: DataManager = data_manager - self.similarity_evaluation = similarity_evaluation - self.post_process_messages_func = post_process_messages_func - self.config = config - self.next_cache = next_cache - - @atexit.register - def close(): - try: - self.data_manager.close() - except Exception as e: - modelcache_log.error(e) - - def import_data(self, questions: List[Any], answers: List[Any]) -> None: - self.data_manager.import_data( - questions=questions, - answers=answers, - embedding_datas=[self.embedding_func(question) for question in questions], - ) - - def flush(self): - self.data_manager.flush() - if self.next_cache: - self.next_cache.data_manager.flush() - - -cache = Cache() diff --git a/modelcache/embedding/__init__.py b/modelcache/embedding/__init__.py index a7ab511..19f9b94 100644 --- a/modelcache/embedding/__init__.py +++ b/modelcache/embedding/__init__.py @@ -1,40 +1,2 @@ # -*- coding: utf-8 -*- -from modelcache.utils.lazy_import import LazyImport -huggingface = LazyImport("huggingface", globals(), "modelcache.embedding.huggingface") -data2vec = LazyImport("data2vec", globals(), "modelcache.embedding.data2vec") -llmEmb = LazyImport("llmEmb", globals(), "modelcache.embedding.llmEmb") -fasttext = LazyImport("fasttext", globals(), "modelcache.embedding.fasttext") -paddlenlp = LazyImport("paddlenlp", globals(), "modelcache.embedding.paddlenlp") -timm = LazyImport("timm", globals(), "modelcache.embedding.timm") -huggingface_tei = LazyImport("huggingface_tei", globals(), "modelcache.embedding.huggingface_tei") -bge_m3 = LazyImport("bge_m3", globals(), "modelcache.embedding.bge_m3") - - -def Huggingface(model="sentence-transformers/all-mpnet-base-v2"): - return huggingface.Huggingface(model) - - -def Data2VecAudio(model="model/text2vec-base-chinese/"): - return data2vec.Data2VecAudio(model) - - -def LlmEmb2vecAudio(): - return llmEmb.LlmEmb2Vec() - - -def FastText(model="en", dim=None): - return fasttext.FastText(model, dim) - - -def PaddleNLP(model="ernie-3.0-medium-zh"): - return paddlenlp.PaddleNLP(model) - - -def Timm(model="resnet50", device="default"): - return timm.Timm(model, device) - -def HuggingfaceTEI(base_url, model): - return huggingface_tei.HuggingfaceTEI(base_url, model) - -def BgeM3Embedding(model_path="model/bge-m3"): - return bge_m3.BgeM3Embedding(model_path) \ No newline at end of file +from modelcache.embedding.base import EmbeddingModel, MetricType, BaseEmbedding \ No newline at end of file diff --git a/modelcache/embedding/base.py b/modelcache/embedding/base.py index 4b316aa..3cd5936 100644 --- a/modelcache/embedding/base.py +++ b/modelcache/embedding/base.py @@ -1,5 +1,39 @@ # -*- coding: utf-8 -*- -from abc import ABCMeta, abstractmethod +from abc import abstractmethod, ABCMeta + +from modelcache.utils.lazy_import import LazyImport +from enum import Enum +huggingface = LazyImport("huggingface", globals(), "modelcache.embedding.huggingface") +data2vec = LazyImport("data2vec", globals(), "modelcache.embedding.data2vec") +llmEmb = LazyImport("llmEmb", globals(), "modelcache.embedding.llmEmb") +fasttext = LazyImport("fasttext", globals(), "modelcache.embedding.fasttext") +paddlenlp = LazyImport("paddlenlp", globals(), "modelcache.embedding.paddlenlp") +timm = LazyImport("timm", globals(), "modelcache.embedding.timm") +huggingface_tei = LazyImport("huggingface_tei", globals(), "modelcache.embedding.huggingface_tei") +bge_m3 = LazyImport("bge_m3", globals(), "modelcache.embedding.bge_m3") + +# define the embedding model enum +class EmbeddingModel(Enum): + """ + Enum for different embedding models. + """ + HUGGINGFACE = "huggingface" + DATA2VEC_AUDIO = "data2vec_audio" + LLM_EMB2VEC_AUDIO = "llmEmb2vec_audio" + FASTTEXT = "fasttext" + PADDLE_NLP = "paddlenlp" + TIMM = "timm" + HUGGINGFACE_TEI = "huggingface_tei" + BGE_M3 = "bge_m3" + + +class MetricType(Enum): + """ + Enum for different metric types used in similarity evaluation. + Different models may require different metrics for optimal performance. + """ + COSINE = "COSINE" + L2 = "L2" class BaseEmbedding(metaclass=ABCMeta): @@ -15,3 +49,52 @@ def to_embeddings(self, data, **kwargs): @abstractmethod def dimension(self) -> int: return 0 + + @staticmethod + def get(model:EmbeddingModel, **kwargs): + """ + Get the embedding model instance based on the specified model type. + :param model: The embedding model type. + :type model: EmbeddingModel + :param kwargs: Additional parameters for the model. + :return: An instance of the specified embedding model. + :rtype: BaseEmbedding + :raises ValueError: If the specified model type is not supported. + """ + if model == EmbeddingModel.HUGGINGFACE: + model_path = kwargs.pop("model_path","sentence-transformers/all-mpnet-base-v2") + return huggingface.Huggingface(model_path) + + elif model == EmbeddingModel.DATA2VEC_AUDIO: + model_path = kwargs.pop("model_path","model/text2vec-base-chinese/") + return data2vec.Data2VecAudio(model_path) + + elif model == EmbeddingModel.LLM_EMB2VEC_AUDIO: + return llmEmb.LlmEmb2Vec() + + elif model == EmbeddingModel.FASTTEXT: + model_path = kwargs.pop("model_path","en") + dim = kwargs.pop("dim", None) + return fasttext.FastText(model_path, dim) + + elif model == EmbeddingModel.PADDLE_NLP: + model_path = kwargs.pop("model_path", "ernie-3.0-medium-zh") + return paddlenlp.PaddleNLP(model_path) + + elif model == EmbeddingModel.TIMM: + model_path = kwargs.pop("model_path", "resnet50") + device = kwargs.pop("device", "default") + return timm.Timm(model_path, device) + + elif model == EmbeddingModel.HUGGINGFACE_TEI: + base_url = kwargs.pop("base_url") + model_path = kwargs.pop("model_path") + return huggingface_tei.HuggingfaceTEI(base_url, model_path) + + elif model == EmbeddingModel.BGE_M3: + model_path = kwargs.pop("model_path","model/bge-m3") + return bge_m3.BgeM3Embedding(model_path) + + else: + raise ValueError(f"Unsupported embedding model: {model}") + diff --git a/modelcache/embedding/huggingface.py b/modelcache/embedding/huggingface.py index 8c1434d..d48cb9d 100644 --- a/modelcache/embedding/huggingface.py +++ b/modelcache/embedding/huggingface.py @@ -1,34 +1,19 @@ # -*- coding: utf-8 -*- -import numpy as np - -from modelcache.utils import import_huggingface, import_torch from modelcache.embedding.base import BaseEmbedding - -import_torch() -import_huggingface() - -import torch # pylint: disable=C0413 -from transformers import AutoTokenizer, AutoModel # pylint: disable=C0413 - +from sentence_transformers import SentenceTransformer class Huggingface(BaseEmbedding): - def __init__(self, model: str = "sentence-transformers/all-MiniLM-L6-v2"): - self.model = AutoModel.from_pretrained(model, local_files_only=True) - self.model.eval() - - # self.tokenizer = AutoTokenizer.from_pretrained(model) - self.tokenizer = AutoTokenizer.from_pretrained(model, local_files_only=True) - if not self.tokenizer.pad_token: - self.tokenizer.pad_token = "[PAD]" + def __init__(self, model: str): + self.model = SentenceTransformer(model) try: self.__dimension = self.model.config.hidden_size - except Exception: # pylint: disable=W0703 - from transformers import AutoConfig # pylint: disable=C0415 + except Exception: + from transformers import AutoConfig config = AutoConfig.from_pretrained(model) self.__dimension = config.hidden_size - def to_embeddings(self, data, **_): + def to_embeddings(self, data: str, **_): """Generate embedding given text input :param data: text in string. @@ -36,24 +21,11 @@ def to_embeddings(self, data, **_): :return: a text embedding in shape of (dim,). """ - if not isinstance(data, list): - data = [data] - inputs = self.tokenizer( - data, padding=True, truncation=True, return_tensors="pt" - ) - outs = self.model(**inputs).last_hidden_state - emb = self.post_proc(outs, inputs).squeeze(0).detach().numpy() - return np.array(emb).astype("float32") - def post_proc(self, token_embeddings, inputs): - attention_mask = inputs["attention_mask"] - input_mask_expanded = ( - attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() - ) - sentence_embs = torch.sum( - token_embeddings * input_mask_expanded, 1 - ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) - return sentence_embs + if not data: + raise ValueError("No data provided for embedding.") + embeddings = self.model.encode(data) + return embeddings[0] if len(data) == 1 else embeddings @property def dimension(self): diff --git a/modelcache/embedding/mpnet_base.py b/modelcache/embedding/mpnet_base.py deleted file mode 100644 index b272c5c..0000000 --- a/modelcache/embedding/mpnet_base.py +++ /dev/null @@ -1,17 +0,0 @@ -from sentence_transformers import SentenceTransformer - -class MPNet_Base: - def __init__(self): - self.dimension = 768 - self.model = SentenceTransformer('sentence-transformers/all-mpnet-base-v2') - - def to_embeddings(self, *args, **kwargs): - if not args: - raise ValueError("No data provided for embedding.") - embeddings = self.model.encode(args) - return embeddings[0] if len(args) == 1 else embeddings - - def similarity(self, a, b): - if not a or not b: - raise ValueError("Both inputs must be non-empty for similarity calculation.") - return self.model.similarity(a, b) diff --git a/modelcache/embedding/string_text.py b/modelcache/embedding/string_text.py deleted file mode 100644 index 4fd08e7..0000000 --- a/modelcache/embedding/string_text.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- - - -def to_embeddings(data, **_): - return data diff --git a/modelcache/manager/__init__.py b/modelcache/manager/__init__.py index 59a5ffe..40a96af 100644 --- a/modelcache/manager/__init__.py +++ b/modelcache/manager/__init__.py @@ -1,5 +1 @@ # -*- coding: utf-8 -*- -from modelcache.manager.scalar_data import CacheBase -from modelcache.manager.vector_data import VectorBase -from modelcache.manager.object_data import ObjectBase -from modelcache.manager.factory import get_data_manager diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index 3b20fd3..13d29b1 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -6,26 +6,15 @@ import numpy as np import cachetools from abc import abstractmethod, ABCMeta -from typing import List, Any, Optional, Union - -from numpy import ndarray - -from modelcache.manager.scalar_data.base import ( - CacheStorage, - CacheData, - DataType, - Answer, - Question -) +from typing import List, Any, Optional +from typing import Union, Callable +from modelcache.manager.scalar_data.base import CacheStorage,CacheData,DataType,Answer,Question from modelcache.utils.error import CacheError, ParamError -from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.manager.vector_data.base import VectorStorage, VectorData from modelcache.manager.object_data.base import ObjectBase -from modelcache.manager.eviction import EvictionBase -from modelcache.manager.eviction_manager import EvictionManager from modelcache.manager.eviction.memory_cache import MemoryCacheEviction from modelcache.utils.log import modelcache_log -NORMALIZE = True class DataManager(metaclass=ABCMeta): """DataManager manage the cache data, including save and search""" @@ -61,9 +50,11 @@ def search(self, embedding_data, **kwargs): def delete(self, id_list, **kwargs): pass + @abstractmethod def truncate(self, model_name): pass + @abstractmethod def flush(self): pass @@ -71,6 +62,30 @@ def flush(self): def close(self): pass + @staticmethod + def get( + cache_base: Union[CacheStorage, str] = None, + vector_base: Union[VectorStorage, str] = None, + object_base: Union[ObjectBase, str] = None, + max_size: int = 3, + clean_size: int = 1, + eviction: str = "ARC", + data_path: str = "data_map.txt", + get_data_container: Callable = None, + normalize: bool = True + ): + if not cache_base and not vector_base: + return MapDataManager(data_path, max_size, get_data_container) + + if isinstance(cache_base, str): + cache_base = CacheStorage.get(name=cache_base) + if isinstance(vector_base, str): + vector_base = VectorStorage.get(name=vector_base) + if isinstance(object_base, str): + object_base = ObjectBase.get(name=object_base) + assert cache_base and vector_base + return SSDataManager(cache_base, vector_base, object_base, max_size, clean_size,normalize, eviction) + class MapDataManager(DataManager): def __init__(self, data_path, max_size, get_data_container=None): @@ -149,10 +164,11 @@ class SSDataManager(DataManager): def __init__( self, s: CacheStorage, - v: VectorBase, + v: VectorStorage, o: Optional[ObjectBase], max_size, clean_size, + normalize: bool, policy="LRU", ): self.max_size = max_size @@ -160,6 +176,7 @@ def __init__( self.s = s self.v = v self.o = o + self.normalize = normalize # added self.eviction_base = MemoryCacheEviction( @@ -208,7 +225,7 @@ def import_data( raise ParamError("Make sure that all parameters have the same length") cache_datas = [] - if NORMALIZE: + if self.normalize: embedding_datas = [ normalize(embedding_data) for embedding_data in embedding_datas ] @@ -251,7 +268,7 @@ def hit_cache_callback(self, res_data, **kwargs): def search(self, embedding_data, **kwargs): model = kwargs.pop("model", None) - if NORMALIZE: + if self.normalize: embedding_data = normalize(embedding_data) top_k = kwargs.get("top_k", -1) return self.v.search(data=embedding_data, top_k=top_k, model=model) @@ -330,8 +347,3 @@ def close(self): self.s.close() self.v.close() - -# if __name__ == '__main__': -# from modelcache.manager import CacheBase, VectorBase, get_data_manager -# data_manager = get_data_manager(CacheBase('mysql'), VectorBase('milvus', dimension=128)) -# data_manager.save('hello', 'hi', np.random.random((128,)).astype('float32'), model='gptcode_6b') diff --git a/modelcache/manager/eviction/__init__.py b/modelcache/manager/eviction/__init__.py index 8ca7a3d..633f866 100644 --- a/modelcache/manager/eviction/__init__.py +++ b/modelcache/manager/eviction/__init__.py @@ -1,10 +1,2 @@ # -*- coding: utf-8 -*- -from modelcache.utils.lazy_import import LazyImport -eviction_manager = LazyImport( - "eviction_manager", globals(), "modelcache.manager.eviction.manager" -) - - -def EvictionBase(name: str, **kwargs): - return eviction_manager.EvictionBase.get(name, **kwargs) diff --git a/modelcache/manager/eviction/manager.py b/modelcache/manager/eviction/manager.py deleted file mode 100644 index 61579f0..0000000 --- a/modelcache/manager/eviction/manager.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -from typing import Callable, List, Any -from modelcache.utils.error import NotFoundError - - -class EvictionBase: - """ - EvictionBase to evict the cache data. - """ - - def __init__(self): - raise EnvironmentError( - "EvictionBase is designed to be instantiated, " - "please using the `EvictionBase.get(name, policy, maxsize, clean_size)`." - ) - - @staticmethod - def get(name: str, policy: str, maxsize: int, clean_size: int, on_evict: Callable[[List[Any]], None], **kwargs): - if name in "memory": - from modelcache.manager.eviction.memory_cache import MemoryCacheEviction - - eviction_base = MemoryCacheEviction(policy, maxsize, clean_size, on_evict, **kwargs) - else: - raise NotFoundError("eviction base", name) - return eviction_base diff --git a/modelcache/manager/eviction/memory_cache.py b/modelcache/manager/eviction/memory_cache.py index c6272f6..8561075 100644 --- a/modelcache/manager/eviction/memory_cache.py +++ b/modelcache/manager/eviction/memory_cache.py @@ -28,6 +28,8 @@ def __init__(self, policy: str, maxsize: int, clean_size: int, on_evict: Callabl self.kwargs = kwargs def create_cache(self, model: str): + + ################# Not integrated with on_evict yet ####################### if self._policy == "LRU": cache = cachetools.LRUCache(maxsize=self.maxsize, **self.kwargs) elif self._policy == "LFU": @@ -36,6 +38,8 @@ def create_cache(self, model: str): cache = cachetools.FIFOCache(maxsize=self.maxsize, **self.kwargs) elif self._policy == "RR": cache = cachetools.RRCache(maxsize=self.maxsize, **self.kwargs) + ########################################################################### + elif self._policy == "WTINYLFU": cache = W2TinyLFU(maxsize=self.maxsize, on_evict=lambda x: self.on_evict(x,model=model)) elif self._policy == "ARC": diff --git a/modelcache/manager/factory.py b/modelcache/manager/factory.py deleted file mode 100644 index b6d5b08..0000000 --- a/modelcache/manager/factory.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- -from typing import Union, Callable -from modelcache.manager import CacheBase, VectorBase, ObjectBase -from modelcache.manager.data_manager import SSDataManager, MapDataManager - - -def get_data_manager( - cache_base: Union[CacheBase, str] = None, - vector_base: Union[VectorBase, str] = None, - object_base: Union[ObjectBase, str] = None, - max_size: int = 3, - clean_size: int = 1, - eviction: str = "ARC", - data_path: str = "data_map.txt", - get_data_container: Callable = None, -): - if not cache_base and not vector_base: - return MapDataManager(data_path, max_size, get_data_container) - - if isinstance(cache_base, str): - cache_base = CacheBase(name=cache_base) - if isinstance(vector_base, str): - vector_base = VectorBase(name=vector_base) - if isinstance(object_base, str): - object_base = ObjectBase(name=object_base) - assert cache_base and vector_base - return SSDataManager(cache_base, vector_base, object_base, max_size, clean_size, eviction) diff --git a/modelcache/manager/object_data/__init__.py b/modelcache/manager/object_data/__init__.py index f1186c0..633f866 100644 --- a/modelcache/manager/object_data/__init__.py +++ b/modelcache/manager/object_data/__init__.py @@ -1,9 +1,2 @@ # -*- coding: utf-8 -*- -from modelcache.utils.lazy_import import LazyImport -object_manager = LazyImport( - "object_manager", globals(), "modelcache.manager.object_data.manager" -) - -def ObjectBase(name: str, **kwargs): - return object_manager.ObjectBase.get(name, **kwargs) diff --git a/modelcache/manager/object_data/base.py b/modelcache/manager/object_data/base.py index 9ed00cf..41466f9 100644 --- a/modelcache/manager/object_data/base.py +++ b/modelcache/manager/object_data/base.py @@ -12,10 +12,6 @@ class ObjectBase(ABC): def put(self, obj: Any) -> str: pass - @abstractmethod - def get(self, obj: str) -> Any: - pass - @abstractmethod def get_access_link(self, obj: str) -> str: pass @@ -23,3 +19,7 @@ def get_access_link(self, obj: str) -> str: @abstractmethod def delete(self, to_delete: List[str]): pass + + @staticmethod + def get(name: str) -> Any: + pass diff --git a/modelcache/manager/scalar_data/__init__.py b/modelcache/manager/scalar_data/__init__.py index b63c430..40a96af 100644 --- a/modelcache/manager/scalar_data/__init__.py +++ b/modelcache/manager/scalar_data/__init__.py @@ -1,9 +1 @@ # -*- coding: utf-8 -*- -from modelcache.utils.lazy_import import LazyImport -scalar_manager = LazyImport( - "scalar_manager", globals(), "modelcache.manager.scalar_data.manager" -) - - -def CacheBase(name: str, **kwargs): - return scalar_manager.CacheBase.get(name, **kwargs) diff --git a/modelcache/manager/scalar_data/base.py b/modelcache/manager/scalar_data/base.py index 607db95..5ed898b 100644 --- a/modelcache/manager/scalar_data/base.py +++ b/modelcache/manager/scalar_data/base.py @@ -5,6 +5,9 @@ from enum import IntEnum import numpy as np +from modelcache.utils import import_sql_client +from modelcache.utils.error import NotFoundError + class DataType(IntEnum): STR = 0 @@ -107,7 +110,7 @@ def mark_deleted(self, keys): pass @abstractmethod - def model_deleted(self, model_name): + def model_deleted(self, model): pass @abstractmethod @@ -122,6 +125,7 @@ def get_ids(self, deleted=True): def count(self): pass + @abstractmethod def flush(self): pass @@ -133,3 +137,27 @@ def close(self): def batch_insert(self, all_data: List[CacheData]): pass + @abstractmethod + def update_hit_count_by_id(self, primary_id): + pass + + @staticmethod + def get(name, **kwargs): + if name in ["mysql", "oceanbase"]: + from modelcache.manager.scalar_data.sql_storage import SQLStorage + config = kwargs.get("config") + import_sql_client(name) + cache_base = SQLStorage(db_type=name, config=config) + elif name == 'sqlite': + SQL_URL = {"sqlite": "./sqlite.db"} + from modelcache.manager.scalar_data.sql_storage_sqlite import SQLStorage + sql_url = kwargs.get("sql_url", SQL_URL[name]) + cache_base = SQLStorage(db_type=name, url=sql_url) + elif name == 'elasticsearch': + from modelcache.manager.scalar_data.sql_storage_es import SQLStorage + config = kwargs.get("config") + cache_base = SQLStorage(db_type=name, config=config) + else: + raise NotFoundError("cache store", name) + return cache_base + diff --git a/modelcache/manager/scalar_data/manager.py b/modelcache/manager/scalar_data/manager.py deleted file mode 100644 index 8ff3aee..0000000 --- a/modelcache/manager/scalar_data/manager.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- -from modelcache.utils import import_sql_client -from modelcache.utils.error import NotFoundError - -SQL_URL = {"sqlite": "./sqlite.db"} - - -class CacheBase: - """ - CacheBase to manager the cache storage. - """ - - def __init__(self): - raise EnvironmentError( - "CacheBase is designed to be instantiated, please using the `CacheBase.get(name)`." - ) - - @staticmethod - def get(name, **kwargs): - - if name in ["mysql", "oceanbase"]: - from modelcache.manager.scalar_data.sql_storage import SQLStorage - config = kwargs.get("config") - import_sql_client(name) - cache_base = SQLStorage(db_type=name, config=config) - elif name == 'sqlite': - from modelcache.manager.scalar_data.sql_storage_sqlite import SQLStorage - sql_url = kwargs.get("sql_url", SQL_URL[name]) - cache_base = SQLStorage(db_type=name, url=sql_url) - elif name == 'elasticsearch': - from modelcache.manager.scalar_data.sql_storage_es import SQLStorage - config = kwargs.get("config") - cache_base = SQLStorage(db_type=name, config=config) - else: - raise NotFoundError("cache store", name) - return cache_base diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index c339285..c24a024 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -12,6 +12,7 @@ class SQLStorage(CacheStorage): + def __init__( self, db_type: str = "mysql", @@ -280,3 +281,6 @@ def close(self): def count_answers(self): pass + + def flush(self): + pass \ No newline at end of file diff --git a/modelcache/manager/scalar_data/sql_storage_es.py b/modelcache/manager/scalar_data/sql_storage_es.py index 7e0184a..3787623 100644 --- a/modelcache/manager/scalar_data/sql_storage_es.py +++ b/modelcache/manager/scalar_data/sql_storage_es.py @@ -8,6 +8,7 @@ class SQLStorage(CacheStorage): + def __init__( self, db_type: str = "elasticsearch", @@ -193,3 +194,6 @@ def count_answers(self): query = {"query": {"match_all": {}}} response = self.client.count(index=self.ans_index, body=query) return response["count"] + + def flush(self): + pass diff --git a/modelcache/manager/scalar_data/sql_storage_sqlite.py b/modelcache/manager/scalar_data/sql_storage_sqlite.py index b7463c5..3b69979 100644 --- a/modelcache/manager/scalar_data/sql_storage_sqlite.py +++ b/modelcache/manager/scalar_data/sql_storage_sqlite.py @@ -199,3 +199,6 @@ def close(self): def count_answers(self): pass + + def flush(self): + pass diff --git a/modelcache/manager/vector_data/__init__.py b/modelcache/manager/vector_data/__init__.py index fdb98cb..40a96af 100644 --- a/modelcache/manager/vector_data/__init__.py +++ b/modelcache/manager/vector_data/__init__.py @@ -1,10 +1 @@ # -*- coding: utf-8 -*- -from modelcache.utils.lazy_import import LazyImport - -vector_manager = LazyImport( - "vector_manager", globals(), "modelcache.manager.vector_data.manager" -) - - -def VectorBase(name: str, **kwargs): - return vector_manager.VectorBase.get(name, **kwargs) diff --git a/modelcache/manager/vector_data/base.py b/modelcache/manager/vector_data/base.py index 7bdf12a..6742c94 100644 --- a/modelcache/manager/vector_data/base.py +++ b/modelcache/manager/vector_data/base.py @@ -4,6 +4,19 @@ from typing import List from dataclasses import dataclass +from modelcache.embedding import MetricType +from modelcache.utils.error import ParamError, NotFoundError + +TOP_K = 1 +FAISS_INDEX_PATH = "faiss.index" +DIMENSION = 0 +MILVUS_HOST = "localhost" +MILVUS_PORT = 19530 +MILVUS_USER = "" +MILVUS_PSW = "" +MILVUS_SECURE = False + +COLLECTION_NAME = "modelcache" @dataclass class VectorData: @@ -11,8 +24,7 @@ class VectorData: data: np.ndarray -class VectorBase(ABC): - """VectorBase: base vector store interface""" +class VectorStorage(ABC): @abstractmethod def mul_add(self, datas: List[VectorData], model=None): @@ -34,8 +46,107 @@ def delete(self, ids) -> bool: def rebuild_col(self, model): pass + @abstractmethod def flush(self): pass + @abstractmethod def close(self): pass + + @staticmethod + def get(name, **kwargs): + top_k = kwargs.get("top_k", TOP_K) + if name == "milvus": + from modelcache.manager.vector_data.milvus import Milvus + dimension = kwargs.get("dimension", DIMENSION) + milvus_config = kwargs.get("config") + check_dimension(dimension) + host = milvus_config.get('milvus', 'host') + port = milvus_config.get('milvus', 'port') + user = milvus_config.get('milvus', 'user') + password = milvus_config.get('milvus', 'password') + + metric_type = kwargs.get("metric_type",MetricType.COSINE) + secure = kwargs.get("secure", MILVUS_SECURE) + collection_name = kwargs.get("collection_name", COLLECTION_NAME) + index_params = kwargs.get("index_params", None) + search_params = kwargs.get("search_params", None) + local_mode = kwargs.get("local_mode", False) + local_data = kwargs.get("local_data", "./milvus_data") + vector_base = Milvus( + host=host, + port=port, + user=user, + password=password, + secure=secure, + collection_name=collection_name, + dimension=dimension, + top_k=top_k, + index_params=index_params, + search_params=search_params, + local_mode=local_mode, + local_data=local_data, + metric_type=metric_type + ) + elif name == "redis": + from modelcache.manager.vector_data.redis import RedisVectorStore + dimension = kwargs.get("dimension", DIMENSION) + check_dimension(dimension) + + redis_config = kwargs.get("config") + host = redis_config.get('redis', 'host') + port = redis_config.get('redis', 'port') + user = redis_config.get('redis', 'user') + password = redis_config.get('redis', 'password') + namespace = kwargs.get("namespace", "") + # collection_name = kwargs.get("collection_name", COLLECTION_NAME) + + vector_base = RedisVectorStore( + host=host, + port=port, + username=user, + password=password, + namespace=namespace, + top_k=top_k, + dimension=dimension, + ) + elif name == "faiss": + from modelcache.manager.vector_data.faiss import Faiss + + dimension = kwargs.get("dimension", DIMENSION) + index_path = kwargs.pop("index_path", FAISS_INDEX_PATH) + check_dimension(dimension) + vector_base = Faiss( + index_file_path=index_path, dimension=dimension, top_k=top_k + ) + elif name == "chromadb": + from modelcache.manager.vector_data.chroma import Chromadb + + chromadb_config = kwargs.get("config", None) + persist_directory = chromadb_config.get('chromadb','persist_directory') + + vector_base = Chromadb( + persist_directory=persist_directory, + top_k=top_k, + ) + elif name == "hnswlib": + from modelcache.manager.vector_data.hnswlib_store import Hnswlib + + dimension = kwargs.get("dimension", DIMENSION) + index_path = kwargs.pop("index_path", "./hnswlib_index.bin") + max_elements = kwargs.pop("max_elements", 100000) + VectorStorage.check_dimension(dimension) + vector_base = Hnswlib( + index_file_path=index_path, dimension=dimension, + top_k=top_k, max_elements=max_elements + ) + else: + raise NotFoundError("vector store", name) + return vector_base + + +def check_dimension(dimension): + if dimension <= 0: + raise ParamError(f"the dimension should be greater than zero, current value: {dimension}.") + diff --git a/modelcache/manager/vector_data/chroma.py b/modelcache/manager/vector_data/chroma.py index 446b354..b92ef3c 100644 --- a/modelcache/manager/vector_data/chroma.py +++ b/modelcache/manager/vector_data/chroma.py @@ -2,7 +2,7 @@ import numpy as np import logging -from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.manager.vector_data.base import VectorStorage, VectorData from modelcache.utils import import_chromadb, import_torch import_torch() @@ -11,7 +11,7 @@ import chromadb -class Chromadb(VectorBase): +class Chromadb(VectorStorage): def __init__( self, diff --git a/modelcache/manager/vector_data/faiss.py b/modelcache/manager/vector_data/faiss.py index 0f8445c..77e7617 100644 --- a/modelcache/manager/vector_data/faiss.py +++ b/modelcache/manager/vector_data/faiss.py @@ -2,13 +2,13 @@ import os from typing import List import numpy as np -from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.manager.vector_data.base import VectorStorage, VectorData from modelcache.utils import import_faiss import_faiss() import faiss # pylint: disable=C0413 -class Faiss(VectorBase): +class Faiss(VectorStorage): def __init__(self, index_file_path, dimension, top_k): self._index_file_path = index_file_path self._dimension = dimension diff --git a/modelcache/manager/vector_data/manager.py b/modelcache/manager/vector_data/manager.py deleted file mode 100644 index 2167fbc..0000000 --- a/modelcache/manager/vector_data/manager.py +++ /dev/null @@ -1,127 +0,0 @@ -# -*- coding: utf-8 -*- -from modelcache.utils.error import NotFoundError, ParamError - -TOP_K = 1 -FAISS_INDEX_PATH = "faiss.index" -DIMENSION = 0 -MILVUS_HOST = "localhost" -MILVUS_PORT = 19530 -MILVUS_USER = "" -MILVUS_PSW = "" -MILVUS_SECURE = False -MILVUS_INDEX_PARAMS = { - "metric_type": "L2", - "index_type": "HNSW", - "params": {"M": 8, "efConstruction": 64}, -} - -COLLECTION_NAME = "modelcache" - -MPNet_base = False # whether to use MPNet base model for embedding, if True, will use cosine similarity evaluation in milvus - - -class VectorBase: - """ - VectorBase to manager the vector base. - """ - - def __init__(self): - raise EnvironmentError( - "VectorBase is designed to be instantiated, please using the `VectorBase.get(name)`." - ) - - @staticmethod - def check_dimension(dimension): - if dimension <= 0: - raise ParamError( - f"the dimension should be greater than zero, current value: {dimension}." - ) - - @staticmethod - def get(name, **kwargs): - top_k = kwargs.get("top_k", TOP_K) - if name == "milvus": - from modelcache.manager.vector_data.milvus import Milvus - dimension = kwargs.get("dimension", DIMENSION) - milvus_config = kwargs.get("milvus_config") - VectorBase.check_dimension(dimension) - host = milvus_config.get('milvus', 'host') - port = milvus_config.get('milvus', 'port') - user = milvus_config.get('milvus', 'user') - password = milvus_config.get('milvus', 'password') - - secure = kwargs.get("secure", MILVUS_SECURE) - collection_name = kwargs.get("collection_name", COLLECTION_NAME) - index_params = kwargs.get("index_params", MILVUS_INDEX_PARAMS) - search_params = kwargs.get("search_params", None) - local_mode = kwargs.get("local_mode", False) - local_data = kwargs.get("local_data", "./milvus_data") - vector_base = Milvus( - host=host, - port=port, - user=user, - password=password, - secure=secure, - collection_name=collection_name, - dimension=dimension, - top_k=top_k, - index_params=index_params, - search_params=search_params, - local_mode=local_mode, - local_data=local_data - ) - elif name == "redis": - from modelcache.manager.vector_data.redis import RedisVectorStore - dimension = kwargs.get("dimension", DIMENSION) - VectorBase.check_dimension(dimension) - - redis_config = kwargs.get("redis_config") - host = redis_config.get('redis', 'host') - port = redis_config.get('redis', 'port') - user = redis_config.get('redis', 'user') - password = redis_config.get('redis', 'password') - namespace = kwargs.get("namespace", "") - # collection_name = kwargs.get("collection_name", COLLECTION_NAME) - - vector_base = RedisVectorStore( - host=host, - port=port, - username=user, - password=password, - namespace=namespace, - top_k=top_k, - dimension=dimension, - ) - elif name == "faiss": - from modelcache.manager.vector_data.faiss import Faiss - - dimension = kwargs.get("dimension", DIMENSION) - index_path = kwargs.pop("index_path", FAISS_INDEX_PATH) - VectorBase.check_dimension(dimension) - vector_base = Faiss( - index_file_path=index_path, dimension=dimension, top_k=top_k - ) - elif name == "chromadb": - from modelcache.manager.vector_data.chroma import Chromadb - - chromadb_config = kwargs.get("chromadb_config", None) - persist_directory = chromadb_config.get('chromadb','persist_directory') - - vector_base = Chromadb( - persist_directory=persist_directory, - top_k=top_k, - ) - elif name == "hnswlib": - from modelcache.manager.vector_data.hnswlib_store import Hnswlib - - dimension = kwargs.get("dimension", DIMENSION) - index_path = kwargs.pop("index_path", "./hnswlib_index.bin") - max_elements = kwargs.pop("max_elements", 100000) - VectorBase.check_dimension(dimension) - vector_base = Hnswlib( - index_file_path=index_path, dimension=dimension, - top_k=top_k, max_elements=max_elements - ) - else: - raise NotFoundError("vector store", name) - return vector_base diff --git a/modelcache/manager/vector_data/milvus.py b/modelcache/manager/vector_data/milvus.py index 1d04430..f5c7682 100644 --- a/modelcache/manager/vector_data/milvus.py +++ b/modelcache/manager/vector_data/milvus.py @@ -3,9 +3,11 @@ from typing import List from uuid import uuid4 import numpy as np + +from modelcache.embedding import MetricType from modelcache.utils import import_pymilvus from modelcache.utils.log import modelcache_log -from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.manager.vector_data.base import VectorStorage, VectorData import_pymilvus() @@ -21,19 +23,7 @@ ) -class Milvus(VectorBase): - SEARCH_PARAM = { - "IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}}, - "IVF_SQ8": {"metric_type": "L2", "params": {"nprobe": 10}}, - "IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}}, - "HNSW": {"metric_type": "L2", "params": {"ef": 10}}, - "RHNSW_FLAT": {"metric_type": "L2", "params": {"ef": 10}}, - "RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}}, - "RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}}, - "IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}}, - "ANNOY": {"metric_type": "L2", "params": {"search_k": 10}}, - "AUTOINDEX": {"metric_type": "L2", "params": {}}, - } +class Milvus(VectorStorage): def __init__( self, @@ -48,7 +38,8 @@ def __init__( index_params: dict = None, search_params: dict = None, local_mode: bool = False, - local_data: str = "./milvus_data" + local_data: str = "./milvus_data", + metric_type: MetricType = MetricType.COSINE, ): if dimension <= 0: raise ValueError( @@ -58,14 +49,27 @@ def __init__( self._local_data = local_data self.dimension = dimension self.top_k = top_k - self.index_params = index_params if self._local_mode: self._create_local(port, local_data) self._connect(host, port, user, password, secure) self.collection_name = collection_name - self.search_params = ( - search_params or self.SEARCH_PARAM[self.index_params["index_type"]] - ) + self.search_params = { + "IVF_FLAT": {"metric_type": metric_type.value, "params": {"nprobe": 10}}, + "IVF_SQ8": {"metric_type": metric_type.value, "params": {"nprobe": 10}}, + "IVF_PQ": {"metric_type": metric_type.value, "params": {"nprobe": 10}}, + "HNSW": {"metric_type": metric_type.value, "params": {"ef": 10}}, + "RHNSW_FLAT": {"metric_type": metric_type.value, "params": {"ef": 10}}, + "RHNSW_SQ": {"metric_type": metric_type.value, "params": {"ef": 10}}, + "RHNSW_PQ": {"metric_type": metric_type.value, "params": {"ef": 10}}, + "IVF_HNSW": {"metric_type": metric_type.value, "params": {"nprobe": 10, "ef": 10}}, + "ANNOY": {"metric_type": metric_type.value, "params": {"search_k": 10}}, + "AUTOINDEX": {"metric_type": metric_type.value, "params": {}}, + } + self.index_params ={ + "metric_type": metric_type.value, + "index_type": "HNSW", + "params": {"M": 16, "efConstruction": 64}, + } self.collections = dict() diff --git a/modelcache/manager/vector_data/redis.py b/modelcache/manager/vector_data/redis.py index afa1088..6e51511 100644 --- a/modelcache/manager/vector_data/redis.py +++ b/modelcache/manager/vector_data/redis.py @@ -6,7 +6,7 @@ from redis.commands.search.field import TagField, VectorField, NumericField from redis.client import Redis -from modelcache.manager.vector_data.base import VectorBase, VectorData +from modelcache.manager.vector_data.base import VectorStorage, VectorData from modelcache.utils import import_redis from modelcache.utils.log import modelcache_log from modelcache.utils.index_util import get_index_name @@ -14,7 +14,7 @@ import_redis() -class RedisVectorStore(VectorBase): +class RedisVectorStore(VectorStorage): def __init__( self, host: str = "localhost", diff --git a/modelcache/similarity_evaluation/__init__.py b/modelcache/similarity_evaluation/__init__.py index 22dff8f..633f866 100644 --- a/modelcache/similarity_evaluation/__init__.py +++ b/modelcache/similarity_evaluation/__init__.py @@ -1,11 +1,2 @@ # -*- coding: utf-8 -*- -from modelcache.similarity_evaluation.similarity_evaluation import SimilarityEvaluation -from modelcache.utils.lazy_import import LazyImport -exact_match = LazyImport( - "exact_match", globals(), "modelcache.similarity_evaluation.exact_match" -) - - -def ExactMatchEvaluation(): - return exact_match.ExactMatchEvaluation() diff --git a/modelcache/similarity_evaluation/similarity_evaluation.py b/modelcache/similarity_evaluation/base.py similarity index 100% rename from modelcache/similarity_evaluation/similarity_evaluation.py rename to modelcache/similarity_evaluation/base.py diff --git a/modelcache/similarity_evaluation/distance.py b/modelcache/similarity_evaluation/distance.py index 44ca595..281bcd0 100644 --- a/modelcache/similarity_evaluation/distance.py +++ b/modelcache/similarity_evaluation/distance.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- from typing import Tuple, Dict, Any -from modelcache.similarity_evaluation import SimilarityEvaluation - +from modelcache.similarity_evaluation.base import SimilarityEvaluation class SearchDistanceEvaluation(SimilarityEvaluation): def __init__(self, max_distance=4.0, positive=False): diff --git a/modelcache/similarity_evaluation/exact_match.py b/modelcache/similarity_evaluation/exact_match.py index 553bd59..e207387 100644 --- a/modelcache/similarity_evaluation/exact_match.py +++ b/modelcache/similarity_evaluation/exact_match.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- from typing import Tuple, Dict, Any -from modelcache.similarity_evaluation.similarity_evaluation import SimilarityEvaluation +from modelcache.similarity_evaluation.base import SimilarityEvaluation class ExactMatchEvaluation(SimilarityEvaluation): diff --git a/modelcache/utils/cache_func.py b/modelcache/utils/cache_func.py deleted file mode 100644 index 461f542..0000000 --- a/modelcache/utils/cache_func.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -def cache_all(*_, **__): - return True \ No newline at end of file diff --git a/modelcache/utils/time.py b/modelcache/utils/time.py index 7074b58..569af99 100644 --- a/modelcache/utils/time.py +++ b/modelcache/utils/time.py @@ -1,15 +1,14 @@ # -*- coding: utf-8 -*- import time -from modelcache import cache - -def time_cal(func, func_name=None, report_func=None): +def time_cal(func, func_name=None, report_func=None, **kwargs): + cache = kwargs.pop("cache_obj") def inner(*args, **kwargs): time_start = time.time() res = func(*args, **kwargs) delta_time = time.time() - time_start - if cache.config.log_time_func: - cache.config.log_time_func( + if cache.log_time_func: + cache.log_time_func( func.__name__ if func_name is None else func_name, delta_time ) if report_func is not None: From 4a20eafce8a11aee559f2695b9fe2c13ac676588 Mon Sep 17 00:00:00 2001 From: Yuval Roth Date: Wed, 11 Jun 2025 02:45:13 +0300 Subject: [PATCH 91/98] New feature: websocket4modelcache A websocket-based API for the ModelCache system. The goal of this API is to save the overhead of creating a new http connection for every request and allow faster querying --- requirements.txt | 1 + websocket4modelcache.py | 45 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 websocket4modelcache.py diff --git a/requirements.txt b/requirements.txt index a84a988..19db268 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,3 +20,4 @@ snowflake-id==1.0.2 flagembedding==1.3.4 cryptography==45.0.2 sentence-transformers==4.1.0 +websockets==15.0.1 diff --git a/websocket4modelcache.py b/websocket4modelcache.py new file mode 100644 index 0000000..4541a91 --- /dev/null +++ b/websocket4modelcache.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +import asyncio +import websockets +import json +from modelcache.cache import Cache + +# Initialize the cache +cache = Cache.init("mysql", "milvus") + + +async def handle_client(websocket): + async for message in websocket: + # Parse JSON + try: + param_dict = json.loads(message) + except json.JSONDecodeError: + await websocket.send(json.dumps({"errorCode": 400, "errorDesc": "bad request"})) + continue + + request_id = param_dict.get("requestId") + request_payload = param_dict.get("payload") + if not request_id or not request_payload: + await websocket.send(json.dumps({"errorCode": 400, "errorDesc": "bad request"})) + continue + asyncio.create_task(process_and_respond(websocket, request_id, request_payload)) + + +async def process_and_respond(websocket,request_id, request_payload): + try: + result = cache.handle_request(request_payload) + await websocket.send(json.dumps({"requestId": request_id,"result": result})) + except Exception as e: + error_result = {"errorCode": 102, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', + "answer": ''} + cache.save_query_resp(error_result, model='', query='', delta_time=0) + await websocket.send(json.dumps(error_result)) + + +async def main(): + print("WebSocket server starting on ws://0.0.0.0:5000") + async with websockets.serve(handle_client, "0.0.0.0", 5000): + await asyncio.Future() # Run forever + +if __name__ == "__main__": + asyncio.run(main()) From a37f92e87f57c98adfced0d2003e2fb4a36502a1 Mon Sep 17 00:00:00 2001 From: Yuval Roth Date: Thu, 12 Jun 2025 01:51:15 +0300 Subject: [PATCH 92/98] New feature: Embedding Dispatcher for parallel embedding Based on the multiprocessing module, provides true parallel embedding. The number of workers can be adjusted in the cache initialization. Access to the caching logic is now done asynchronously using asyncio module --- fastapi4modelcache.py | 75 +++++++----------- fastapi4modelcache_demo.py | 75 +++++++----------- flask4modelcache.py | 58 ++++++++------ flask4modelcache_demo.py | 58 ++++++++------ model/text2vec-base-chinese/logs.txt | 20 ++++- modelcache/adapter/adapter.py | 8 +- modelcache/adapter/adapter_insert.py | 13 ++- modelcache/adapter/adapter_query.py | 4 +- modelcache/cache.py | 52 +++++++----- modelcache/embedding/base.py | 21 ++--- modelcache/embedding/embedding_dispatcher.py | 71 +++++++++++++++++ modelcache/embedding/huggingface.py | 4 +- requirements.txt | 5 +- websocket4modelcache.py | 83 +++++++++++--------- websocket4modelcache_demo.py | 58 ++++++++++++++ 15 files changed, 393 insertions(+), 212 deletions(-) create mode 100644 modelcache/embedding/embedding_dispatcher.py create mode 100644 websocket4modelcache_demo.py diff --git a/fastapi4modelcache.py b/fastapi4modelcache.py index d3d39bf..b260eae 100644 --- a/fastapi4modelcache.py +++ b/fastapi4modelcache.py @@ -1,61 +1,46 @@ # -*- coding: utf-8 -*- +import asyncio +from contextlib import asynccontextmanager import uvicorn import json -from fastapi import FastAPI, Request, HTTPException +from fastapi.responses import JSONResponse +from fastapi import FastAPI, Request from modelcache.cache import Cache - -#创建一个FastAPI实例 -app = FastAPI() - -cache = Cache.init("mysql", "milvus") +from modelcache.embedding import EmbeddingModel + +@asynccontextmanager +async def lifespan(app: FastAPI): + global cache + cache, _ = await Cache.init( + sql_storage="mysql", + vector_storage="milvus", + embedding_model=EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2, + embedding_workers_num=2 + ) + yield + +app = FastAPI(lifespan=lifespan) +cache: Cache = None @app.get("/welcome") async def first_fastapi(): return "hello, modelcache!" - @app.post("/modelcache") async def user_backend(request: Request): - try: - raw_body = await request.body() - # 解析字符串为JSON对象 - if isinstance(raw_body, bytes): - raw_body = raw_body.decode("utf-8") - if isinstance(raw_body, str): - try: - # 尝试将字符串解析为JSON对象 - request_data = json.loads(raw_body) - except json.JSONDecodeError as e: - # 如果无法解析,返回格式错误 - result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', - "answer": ''} - cache.save_query_info(result, model='', query='', delta_time_log=0) - raise HTTPException(status_code=101, detail="Invalid JSON format") - else: - request_data = raw_body - # 确保request_data是字典对象 - if isinstance(request_data, str): - try: - request_data = json.loads(request_data) - except json.JSONDecodeError: - raise HTTPException(status_code=101, detail="Invalid JSON format") - - return cache.handle_request(request_data) + try: + request_data = await request.json() + except Exception: + result = {"errorCode": 400, "errorDesc": "bad request", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + return JSONResponse(status_code=400, content=result) + try: + return await cache.handle_request(request_data) except Exception as e: - request_data = raw_body if 'raw_body' in locals() else None - result = { - "errorCode": 103, - "errorDesc": str(e), - "cacheHit": False, - "delta_time": 0, - "hit_query": '', - "answer": '', - "para_dict": request_data - } - return result + result = {"errorCode": 500, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + cache.save_query_resp(result, model='', query='', delta_time=0) + return JSONResponse(status_code=500, content=result) -# TODO: 可以修改为在命令行中使用`uvicorn your_module_name:app --host 0.0.0.0 --port 5000 --reload`的命令启动 if __name__ == '__main__': - uvicorn.run(app, host='0.0.0.0', port=5000) \ No newline at end of file + uvicorn.run(app, host='0.0.0.0', port=5000, loop="asyncio", http="httptools") diff --git a/fastapi4modelcache_demo.py b/fastapi4modelcache_demo.py index 84058fa..c1f6a3f 100644 --- a/fastapi4modelcache_demo.py +++ b/fastapi4modelcache_demo.py @@ -1,14 +1,26 @@ # -*- coding: utf-8 -*- +import asyncio +from contextlib import asynccontextmanager import uvicorn import json -from fastapi import FastAPI, Request, HTTPException - +from fastapi.responses import JSONResponse +from fastapi import FastAPI, Request from modelcache.cache import Cache - -# 创建一个FastAPI实例 -app = FastAPI() - -cache = Cache.init("sqlite", "faiss") +from modelcache.embedding import EmbeddingModel + +@asynccontextmanager +async def lifespan(app: FastAPI): + global cache + cache, _ = await Cache.init( + sql_storage="sqlite", + vector_storage="faiss", + embedding_model=EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2, + embedding_workers_num=2 + ) + yield + +app = FastAPI(lifespan=lifespan) +cache: Cache = None @app.get("/welcome") async def first_fastapi(): @@ -16,46 +28,19 @@ async def first_fastapi(): @app.post("/modelcache") async def user_backend(request: Request): - try: - raw_body = await request.body() - # 解析字符串为JSON对象 - if isinstance(raw_body, bytes): - raw_body = raw_body.decode("utf-8") - if isinstance(raw_body, str): - try: - # 尝试将字符串解析为JSON对象 - request_data = json.loads(raw_body) - except json.JSONDecodeError as e: - # 如果无法解析,返回格式错误 - result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', - "answer": ''} - cache.save_query_info(result, model='', query='', delta_time_log=0) - raise HTTPException(status_code=101, detail="Invalid JSON format") - else: - request_data = raw_body - # 确保request_data是字典对象 - if isinstance(request_data, str): - try: - request_data = json.loads(request_data) - except json.JSONDecodeError: - raise HTTPException(status_code=101, detail="Invalid JSON format") - - return cache.handle_request(request_data) + try: + request_data = await request.json() + except Exception: + result = {"errorCode": 400, "errorDesc": "bad request", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + return JSONResponse(status_code=400, content=result) + try: + return await cache.handle_request(request_data) except Exception as e: - request_data = raw_body if 'raw_body' in locals() else None - result = { - "errorCode": 103, - "errorDesc": str(e), - "cacheHit": False, - "delta_time": 0, - "hit_query": '', - "answer": '', - "para_dict": request_data - } - return result + result = {"errorCode": 500, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + cache.save_query_resp(result, model='', query='', delta_time=0) + return JSONResponse(status_code=500, content=result) -# TODO: 可以修改为在命令行中使用`uvicorn your_module_name:app --host 0.0.0.0 --port 5000 --reload`的命令启动 if __name__ == '__main__': - uvicorn.run(app, host='0.0.0.0', port=5000) \ No newline at end of file + uvicorn.run(app, host='0.0.0.0', port=5000, loop="asyncio", http="httptools") diff --git a/flask4modelcache.py b/flask4modelcache.py index 0ca949d..3d18518 100644 --- a/flask4modelcache.py +++ b/flask4modelcache.py @@ -1,34 +1,48 @@ # -*- coding: utf-8 -*- -from flask import Flask, request -import json +import asyncio + +from flask import Flask, request, jsonify from modelcache.cache import Cache +from modelcache.embedding import EmbeddingModel + -# 创建一个Flask实例 -app = Flask(__name__) +async def main(): -cache = Cache.init("mysql","milvus") + # 创建一个Flask实例 + app = Flask(__name__) -@app.route('/welcome') -def first_flask(): # 视图函数 - return 'hello, modelcache!' + cache,loop = await Cache.init( + sql_storage="mysql", + vector_storage="milvus", + embedding_model=EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2, + embedding_workers_num=2 + ) + @app.route('/welcome') + def first_flask(): # 视图函数 + return 'hello, modelcache!' -@app.route('/modelcache', methods=['GET', 'POST']) -def user_backend(): - param_dict = {} - try: - if request.method == 'POST': + + @app.post('/modelcache') + def user_backend(): + try: param_dict = request.json - elif request.method == 'GET': - param_dict = request.args + except Exception: + result = {"errorCode": 400, "errorDesc": "bad request", "cacheHit": False, "delta_time": 0, "hit_query": '',"answer": ''} + return jsonify(result), 400 + + try: + result = asyncio.run_coroutine_threadsafe( + cache.handle_request(param_dict), loop + ).result() + return jsonify(result), 200 + except Exception as e: + result = {"errorCode": 500, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '',"answer": ''} + cache.save_query_resp(result, model='', query='', delta_time=0) + return jsonify(result), 500 - return json.dumps(cache.handle_request(param_dict)) - except Exception as e: - result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', - "answer": ''} - cache.save_query_resp(result, model='', query='', delta_time=0) - return json.dumps(result) + await asyncio.to_thread(app.run, host='0.0.0.0', port=5000) if __name__ == '__main__': - app.run(host='0.0.0.0', port=5000) + asyncio.run(main()) diff --git a/flask4modelcache_demo.py b/flask4modelcache_demo.py index e0d5cfb..4251f94 100644 --- a/flask4modelcache_demo.py +++ b/flask4modelcache_demo.py @@ -1,34 +1,48 @@ # -*- coding: utf-8 -*- -from flask import Flask, request -import json +import asyncio + +from flask import Flask, request, jsonify from modelcache.cache import Cache +from modelcache.embedding import EmbeddingModel + -# 创建一个Flask实例 -app = Flask(__name__) +async def main(): -cache = Cache.init("sqlite","faiss") + # 创建一个Flask实例 + app = Flask(__name__) -@app.route('/welcome') -def first_flask(): # 视图函数 - return 'hello, modelcache!' + cache,loop = await Cache.init( + sql_storage="sqlite", + vector_storage="faiss", + embedding_model=EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2, + embedding_workers_num=2 + ) + @app.route('/welcome') + def first_flask(): # 视图函数 + return 'hello, modelcache!' -@app.route('/modelcache', methods=['GET', 'POST']) -def user_backend(): - param_dict = {} - try: - if request.method == 'POST': + + @app.post('/modelcache') + def user_backend(): + try: param_dict = request.json - elif request.method == 'GET': - param_dict = request.args + except Exception: + result = {"errorCode": 400, "errorDesc": "bad request", "cacheHit": False, "delta_time": 0, "hit_query": '',"answer": ''} + return jsonify(result), 400 + + try: + result = asyncio.run_coroutine_threadsafe( + cache.handle_request(param_dict), loop + ).result() + return jsonify(result), 200 + except Exception as e: + result = {"errorCode": 500, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '',"answer": ''} + cache.save_query_resp(result, model='', query='', delta_time=0) + return jsonify(result), 500 - return json.dumps(cache.handle_request(param_dict)) - except Exception as e: - result = {"errorCode": 101, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', - "answer": ''} - cache.save_query_resp(result, model='', query='', delta_time=0) - return json.dumps(result) + await asyncio.to_thread(app.run, host='0.0.0.0', port=5000) if __name__ == '__main__': - app.run(host='0.0.0.0', port=5000) + asyncio.run(main()) diff --git a/model/text2vec-base-chinese/logs.txt b/model/text2vec-base-chinese/logs.txt index 8b13789..1f2f0a8 100644 --- a/model/text2vec-base-chinese/logs.txt +++ b/model/text2vec-base-chinese/logs.txt @@ -1 +1,19 @@ - +Epoch:0 Valid| corr: 0.794410 +Epoch:0 Valid| corr: 0.691819 +Epoch:1 Valid| corr: 0.722749 +Epoch:2 Valid| corr: 0.735054 +Epoch:3 Valid| corr: 0.738295 +Epoch:4 Valid| corr: 0.739411 +Test | corr: 0.679971 +Epoch:0 Valid| corr: 0.817416 +Epoch:1 Valid| corr: 0.832376 +Epoch:2 Valid| corr: 0.842308 +Epoch:3 Valid| corr: 0.843520 +Epoch:4 Valid| corr: 0.841837 +Test | corr: 0.793495 +Epoch:0 Valid| corr: 0.814648 +Epoch:1 Valid| corr: 0.831609 +Epoch:2 Valid| corr: 0.841678 +Epoch:3 Valid| corr: 0.842387 +Epoch:4 Valid| corr: 0.841435 +Test | corr: 0.794840 diff --git a/modelcache/adapter/adapter.py b/modelcache/adapter/adapter.py index 204841b..d62278d 100644 --- a/modelcache/adapter/adapter.py +++ b/modelcache/adapter/adapter.py @@ -10,11 +10,11 @@ class ChatCompletion(object): """Openai ChatCompletion Wrapper""" @classmethod - def create_query(cls, *args, **kwargs): + async def create_query(cls, *args, **kwargs): def cache_data_convert(cache_data, cache_query): return construct_resp_from_cache(cache_data, cache_query) try: - return adapt_query( + return await adapt_query( cache_data_convert, *args, **kwargs @@ -24,9 +24,9 @@ def cache_data_convert(cache_data, cache_query): return str(e) @classmethod - def create_insert(cls, *args, **kwargs): + async def create_insert(cls, *args, **kwargs): try: - return adapt_insert( + return await adapt_insert( *args, **kwargs ) diff --git a/modelcache/adapter/adapter_insert.py b/modelcache/adapter/adapter_insert.py index a507ee1..b4d1f6a 100644 --- a/modelcache/adapter/adapter_insert.py +++ b/modelcache/adapter/adapter_insert.py @@ -1,9 +1,11 @@ # -*- coding: utf-8 -*- +import asyncio + from modelcache.utils.error import NotInitError from modelcache.utils.time import time_cal -def adapt_insert(*args, **kwargs): +async def adapt_insert(*args, **kwargs): chat_cache = kwargs.pop("cache_obj") model = kwargs.pop("model", None) require_object_store = kwargs.pop("require_object_store", False) @@ -13,7 +15,8 @@ def adapt_insert(*args, **kwargs): chat_info = kwargs.pop("chat_info", []) pre_embedding_data_list = [] - embedding_data_list = [] + embedding_futures_list = [] + # embedding_data_list = [] llm_data_list = [] for row in chat_info: @@ -24,13 +27,15 @@ def adapt_insert(*args, **kwargs): ) pre_embedding_data_list.append(pre_embedding_data) llm_data_list.append(row['answer']) - embedding_data = time_cal( + embedding_future = time_cal( chat_cache.embedding_func, func_name="embedding", report_func=chat_cache.report.embedding, cache_obj=chat_cache )(pre_embedding_data) - embedding_data_list.append(embedding_data) + embedding_futures_list.append(embedding_future) + + embedding_data_list = await asyncio.gather(*embedding_futures_list) chat_cache.data_manager.save( pre_embedding_data_list, diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index 2e56cc2..2eb8fa4 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -6,7 +6,7 @@ USE_RERANKER = False # 如果为 True 则启用 reranker,否则使用原有逻辑 -def adapt_query(cache_data_convert, *args, **kwargs): +async def adapt_query(cache_data_convert, *args, **kwargs): chat_cache = kwargs.pop("cache_obj") scope = kwargs.pop("scope") model = scope['model'] @@ -17,7 +17,7 @@ def adapt_query(cache_data_convert, *args, **kwargs): extra_param=context.get("pre_embedding_func", None), prompts=chat_cache.prompts, ) - embedding_data = time_cal( + embedding_data = await time_cal( chat_cache.embedding_func, func_name="embedding", report_func=chat_cache.report.embedding, diff --git a/modelcache/cache.py b/modelcache/cache.py index 8144138..16c744d 100644 --- a/modelcache/cache.py +++ b/modelcache/cache.py @@ -1,12 +1,15 @@ # -*- coding: utf-8 -*- +import asyncio import atexit import json import logging import time -from typing import Callable, Optional, List +from asyncio import AbstractEventLoop +from typing import Callable, Optional, List, Any, Coroutine from modelcache.adapter import adapter +from modelcache.embedding.embedding_dispatcher import EmbeddingDispatcher from modelcache.utils.model_filter import model_blacklist_filter -from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import ThreadPoolExecutor, Future import configparser from modelcache.embedding.base import BaseEmbedding, EmbeddingModel, MetricType from modelcache.manager.scalar_data.sql_storage import SQLStorage @@ -41,10 +44,11 @@ def __init__( data_manager: DataManager, query_pre_embedding_func: Callable, insert_pre_embedding_func: Callable, - embedding_func: Callable, + embedding_func: Callable[[str], Future], report: Report, # TODO: figure out why this is needed similarity_evaluation: Optional[SimilarityEvaluation], post_process_messages_func: Callable, + similarity_threshold: float = 0.95, similarity_threshold_long: float = 0.95, prompts: Optional[List[str]] = None, @@ -82,7 +86,7 @@ def save_query_info(self,result, model, query, delta_time_log): self.data_manager.save_query_resp(result, model=model, query=json.dumps(query, ensure_ascii=False), delta_time=delta_time_log) - def handle_request(self, param_dict: dict): + async def handle_request(self, param_dict: dict): # param parsing try: request_type = param_dict.get("type") @@ -112,9 +116,9 @@ def handle_request(self, param_dict: dict): # handle request if request_type == 'query': - return self.handle_query(model, query) + return await self.handle_query(model, query) elif request_type == 'insert': - return self.handle_insert(chat_info, model) + return await self.handle_insert(chat_info, model) elif request_type == 'remove': return self.handle_remove(model, param_dict) elif request_type == 'register': @@ -151,10 +155,10 @@ def handle_remove(self, model, param_dict): result = {"errorCode": 402, "errorDesc": "", "response": response, "writeStatus": "exception"} return result - def handle_insert(self, chat_info, model): + async def handle_insert(self, chat_info, model): try: try: - response = adapter.ChatCompletion.create_insert( + response = await adapter.ChatCompletion.create_insert( model=model, chat_info=chat_info, cache_obj=self @@ -170,10 +174,10 @@ def handle_insert(self, chat_info, model): except Exception as e: return {"errorCode": 303, "errorDesc": str(e), "writeStatus": "exception"} - def handle_query(self, model, query): + async def handle_query(self, model, query): try: start_time = time.time() - response = adapter.ChatCompletion.create_query( + response = await adapter.ChatCompletion.create_query( scope={"model": model}, query=query, cache_obj=self @@ -203,7 +207,12 @@ def flush(self): self.data_manager.flush() @staticmethod - def init(sql_storage: str, vector_storage: str) -> 'Cache': + async def init( + sql_storage: str, + vector_storage: str, + embedding_model: EmbeddingModel, + embedding_workers_num: int + ) -> tuple['Cache' , AbstractEventLoop]: #================= configurations for databases ===================# sql_config = configparser.ConfigParser() @@ -234,9 +243,15 @@ def init(sql_storage: str, vector_storage: str) -> 'Cache': #=============== model-specific configuration =====================# - embedding_model = EmbeddingModel.HUGGINGFACE - model_path = "sentence-transformers/all-mpnet-base-v2" - base_embedding = BaseEmbedding.get(embedding_model, model_path=model_path) + event_loop = asyncio.get_running_loop() + model_path = embedding_model.value['model_path'] + dimension = embedding_model.value['dimension'] + + if model_path is None or dimension is None: + modelcache_log.error(f"Please set the model_path and dimension for {embedding_model} in modelcache/embedding/base.py.") + raise CacheError(f"Please set the model_path and dimension for {embedding_model} in modelcache/embedding/base.py.") + + embedding_dispatcher = EmbeddingDispatcher(embedding_model, model_path, event_loop, embedding_workers_num) #=== These will be used to initialize the cache ===# query_pre_embedding_func: Callable = None @@ -250,7 +265,7 @@ def init(sql_storage: str, vector_storage: str) -> 'Cache': #==================================================# # switching based on embedding_model - if embedding_model == EmbeddingModel.HUGGINGFACE: + if embedding_model == EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2: query_pre_embedding_func = query_with_role insert_pre_embedding_func = query_with_role post_process_messages_func = first @@ -281,7 +296,7 @@ def init(sql_storage: str, vector_storage: str) -> 'Cache': SQLStorage.get(sql_storage, config=sql_config), VectorStorage.get( name=vector_storage, - dimension=base_embedding.dimension, + dimension=dimension, config=vector_config, metric_type=similarity_metric_type, ), @@ -290,7 +305,6 @@ def init(sql_storage: str, vector_storage: str) -> 'Cache': normalize=normalize, ) - #================== Cache Initialization ====================# cache = Cache( @@ -298,7 +312,7 @@ def init(sql_storage: str, vector_storage: str) -> 'Cache': similarity_metric_type = similarity_metric_type, data_manager = data_manager, report = Report(), - embedding_func = base_embedding.to_embeddings, + embedding_func = embedding_dispatcher.embed, query_pre_embedding_func = query_pre_embedding_func, insert_pre_embedding_func = insert_pre_embedding_func, similarity_evaluation = similarity_evaluation, @@ -308,4 +322,4 @@ def init(sql_storage: str, vector_storage: str) -> 'Cache': prompts = None, log_time_func = None, ) - return cache \ No newline at end of file + return cache, event_loop \ No newline at end of file diff --git a/modelcache/embedding/base.py b/modelcache/embedding/base.py index 3cd5936..aafdc97 100644 --- a/modelcache/embedding/base.py +++ b/modelcache/embedding/base.py @@ -17,14 +17,17 @@ class EmbeddingModel(Enum): """ Enum for different embedding models. """ - HUGGINGFACE = "huggingface" - DATA2VEC_AUDIO = "data2vec_audio" - LLM_EMB2VEC_AUDIO = "llmEmb2vec_audio" - FASTTEXT = "fasttext" - PADDLE_NLP = "paddlenlp" - TIMM = "timm" - HUGGINGFACE_TEI = "huggingface_tei" - BGE_M3 = "bge_m3" + # todo: fill in the dimension and model_path for each embedding model as needed + HUGGINGFACE_ALL_MPNET_BASE_V2 = {"dimension":768, "model_path":"sentence-transformers/all-mpnet-base-v2"} + HUGGINGFACE_ALL_MINILM_L6_V2 = {"dimension":384, "model_path":"sentence-transformers/all-MiniLM-L6-v2"} + HUGGINGFACE_ALL_MINILM_L12_V2 = {"dimension":384, "model_path":"sentence-transformers/all-MiniLM-L12-v2"} + DATA2VEC_AUDIO = {"dimension":None, "model_path":"model/text2vec-base-chinese/"} + LLM_EMB2VEC_AUDIO = {"dimension":None, "model_path":None} + FASTTEXT = {"dimension":None, "model_path":None} + PADDLE_NLP = {"dimension":None, "model_path":None} + TIMM = {"dimension":None, "model_path":None} + HUGGINGFACE_TEI = {"dimension":None, "model_path":None} + BGE_M3 = {"dimension":None, "model_path":None} class MetricType(Enum): @@ -61,7 +64,7 @@ def get(model:EmbeddingModel, **kwargs): :rtype: BaseEmbedding :raises ValueError: If the specified model type is not supported. """ - if model == EmbeddingModel.HUGGINGFACE: + if model == EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2: model_path = kwargs.pop("model_path","sentence-transformers/all-mpnet-base-v2") return huggingface.Huggingface(model_path) diff --git a/modelcache/embedding/embedding_dispatcher.py b/modelcache/embedding/embedding_dispatcher.py new file mode 100644 index 0000000..358cff1 --- /dev/null +++ b/modelcache/embedding/embedding_dispatcher.py @@ -0,0 +1,71 @@ +import multiprocessing +import threading +import uuid +import asyncio +from asyncio import Future, AbstractEventLoop + +from modelcache.embedding import EmbeddingModel +from modelcache.embedding.base import BaseEmbedding + + +def worker_func(embedding_model: EmbeddingModel, model_path, task_queue, result_queue, worker_id): + base_embedding = BaseEmbedding.get(embedding_model, model_path=model_path) + print(f"Embedding worker {worker_id} started.") + while True: + job_id, data = task_queue.get() + try: + result = base_embedding.to_embeddings(data) + except Exception as e: + result = e + result_queue.put((job_id, result)) + + +class EmbeddingDispatcher: + def __init__( + self, + embedding_model: EmbeddingModel, + model_path: str, + event_loop: AbstractEventLoop, + num_workers: int + ): + if num_workers <= 0: + raise ValueError("Number of workers must be greater than 0.") + + self.task_queue = multiprocessing.Queue() + self.result_queue = multiprocessing.Queue() + self.futures: dict[str, asyncio.Future] = {} + self.event_loop = event_loop + self._start_result_collector_thread() + + # Start worker processes + self.workers = [] + for i in range(num_workers): + p = multiprocessing.Process( + target=worker_func, + args=(embedding_model, model_path, self.task_queue, self.result_queue, i) + ) + p.daemon = True + p.start() + self.workers.append(p) + + def _start_result_collector_thread(self): + def collect(): + while True: + job_id, result = self.result_queue.get() + future = self.futures.pop(job_id, None) + if future: + self.event_loop.call_soon_threadsafe( + future.set_exception if isinstance(result, Exception) else future.set_result, + result + ) + + t = threading.Thread(target=collect, daemon=True) + t.start() + + def embed(self, data: str) -> Future: + job_id = str(uuid.uuid4()) + future = asyncio.get_running_loop().create_future() + self.futures[job_id] = future + self.task_queue.put((job_id, data)) + return future + diff --git a/modelcache/embedding/huggingface.py b/modelcache/embedding/huggingface.py index d48cb9d..77ca08f 100644 --- a/modelcache/embedding/huggingface.py +++ b/modelcache/embedding/huggingface.py @@ -4,7 +4,9 @@ class Huggingface(BaseEmbedding): def __init__(self, model: str): - self.model = SentenceTransformer(model) + self.model = SentenceTransformer(model,tokenizer_kwargs={ + "clean_up_tokenization_spaces":False + }) try: self.__dimension = self.model.config.hidden_size except Exception: diff --git a/requirements.txt b/requirements.txt index 19db268..05fd6db 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,12 +12,11 @@ transformers==4.44.2 faiss-cpu==1.7.4 redis==5.0.1 modelscope==1.26.0 -fastapi==0.115.5 -uvicorn==0.32.0 +fastapi==0.115.12 +uvicorn==0.34.3 chromadb==0.5.23 elasticsearch==7.10.0 snowflake-id==1.0.2 flagembedding==1.3.4 cryptography==45.0.2 sentence-transformers==4.1.0 -websockets==15.0.1 diff --git a/websocket4modelcache.py b/websocket4modelcache.py index 4541a91..fe99cd5 100644 --- a/websocket4modelcache.py +++ b/websocket4modelcache.py @@ -1,45 +1,58 @@ # -*- coding: utf-8 -*- -import asyncio -import websockets +from contextlib import asynccontextmanager +import uvicorn import json +import asyncio +from fastapi import FastAPI, WebSocket +from starlette.websockets import WebSocketDisconnect from modelcache.cache import Cache - -# Initialize the cache -cache = Cache.init("mysql", "milvus") - - -async def handle_client(websocket): - async for message in websocket: - # Parse JSON - try: - param_dict = json.loads(message) - except json.JSONDecodeError: - await websocket.send(json.dumps({"errorCode": 400, "errorDesc": "bad request"})) - continue - - request_id = param_dict.get("requestId") - request_payload = param_dict.get("payload") - if not request_id or not request_payload: - await websocket.send(json.dumps({"errorCode": 400, "errorDesc": "bad request"})) - continue - asyncio.create_task(process_and_respond(websocket, request_id, request_payload)) +from modelcache.embedding import EmbeddingModel + +@asynccontextmanager +async def lifespan(app: FastAPI): + global cache + cache, _ = await Cache.init( + sql_storage="mysql", + vector_storage="milvus", + embedding_model=EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2, + embedding_workers_num=2 + ) + yield + +app = FastAPI(lifespan=lifespan) +cache: Cache = None + +@app.websocket("/modelcache") +async def user_backend(websocket: WebSocket): + await websocket.accept() + try: + while True: + data = await websocket.receive_text() + asyncio.create_task(handle_message(websocket, data)) + except WebSocketDisconnect as e: + print(e) -async def process_and_respond(websocket,request_id, request_payload): +async def handle_message(websocket,message): try: - result = cache.handle_request(request_payload) - await websocket.send(json.dumps({"requestId": request_id,"result": result})) + param_dict = json.loads(message) + except Exception: + await websocket.send_json({"errorCode": 400, "errorDesc": "bad request", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''}) + return + + request_id = param_dict.get("requestId") + request_payload = param_dict.get("payload") + if not request_id or not request_payload: + await websocket.send_json({"errorCode": 400, "errorDesc": "bad request", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''}) + return + try: + result = await cache.handle_request(request_payload) + await websocket.send_json({"requestId": request_id,"result": result}) except Exception as e: - error_result = {"errorCode": 102, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', - "answer": ''} + error_result = {"errorCode": 500, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} cache.save_query_resp(error_result, model='', query='', delta_time=0) - await websocket.send(json.dumps(error_result)) - + await websocket.send_json(error_result) -async def main(): - print("WebSocket server starting on ws://0.0.0.0:5000") - async with websockets.serve(handle_client, "0.0.0.0", 5000): - await asyncio.Future() # Run forever -if __name__ == "__main__": - asyncio.run(main()) +if __name__ == '__main__': + uvicorn.run(app, host='0.0.0.0', port=5000, loop="asyncio", http="httptools") diff --git a/websocket4modelcache_demo.py b/websocket4modelcache_demo.py new file mode 100644 index 0000000..2a20bc8 --- /dev/null +++ b/websocket4modelcache_demo.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +from contextlib import asynccontextmanager +import uvicorn +import json +import asyncio +from fastapi import FastAPI, WebSocket +from starlette.websockets import WebSocketDisconnect +from modelcache.cache import Cache +from modelcache.embedding import EmbeddingModel + +@asynccontextmanager +async def lifespan(app: FastAPI): + global cache + cache, _ = await Cache.init( + sql_storage="sqlite", + vector_storage="faiss", + embedding_model=EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2, + embedding_workers_num=2 + ) + yield + +app = FastAPI(lifespan=lifespan) +cache: Cache = None + +@app.websocket("/modelcache") +async def user_backend(websocket: WebSocket): + await websocket.accept() + try: + while True: + data = await websocket.receive_text() + asyncio.create_task(handle_message(websocket, data)) + except WebSocketDisconnect as e: + print(e) + + +async def handle_message(websocket,message): + try: + param_dict = json.loads(message) + except Exception: + await websocket.send_json({"errorCode": 400, "errorDesc": "bad request", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''}) + return + + request_id = param_dict.get("requestId") + request_payload = param_dict.get("payload") + if not request_id or not request_payload: + await websocket.send_json({"errorCode": 400, "errorDesc": "bad request", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''}) + return + try: + result = await cache.handle_request(request_payload) + await websocket.send_json({"requestId": request_id,"result": result}) + except Exception as e: + error_result = {"errorCode": 500, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} + cache.save_query_resp(error_result, model='', query='', delta_time=0) + await websocket.send_json(error_result) + + +if __name__ == '__main__': + uvicorn.run(app, host='0.0.0.0', port=5000, loop="asyncio", http="httptools") From 512c5b151df039208dd4b38d30181ff3e5690b89 Mon Sep 17 00:00:00 2001 From: adiaybgu Date: Thu, 12 Jun 2025 14:29:04 +0300 Subject: [PATCH 93/98] Unit Tests for wtinylfu_cache.py and arc_cache.py Co-authored-by: omerdor001 Co-authored-by: adiaybgu --- modelcache/manager/eviction/wtinylfu_cache.py | 12 +- requirements.txt | 2 + tests/__init__.py | 0 tests/test_arc_cache.py | 208 +++++++++++++++ tests/test_wtinylfu_cache.py | 237 ++++++++++++++++++ 5 files changed, 457 insertions(+), 2 deletions(-) create mode 100644 tests/__init__.py create mode 100644 tests/test_arc_cache.py create mode 100644 tests/test_wtinylfu_cache.py diff --git a/modelcache/manager/eviction/wtinylfu_cache.py b/modelcache/manager/eviction/wtinylfu_cache.py index d7f4272..f33a572 100644 --- a/modelcache/manager/eviction/wtinylfu_cache.py +++ b/modelcache/manager/eviction/wtinylfu_cache.py @@ -117,13 +117,21 @@ def _put(self, key): def _admit_to_main(self, key): if key in self.protected or key in self.probation: return + if self.probation_size == 0: + if self.on_evict: + self.on_evict(key) + self.data.pop(key, None) + return if len(self.probation) < self.probation_size: self.probation[key] = True - else: + elif self.probation: evicted = next(iter(self.probation)) self.probation.pop(evicted) self.probation[key] = True - # this eviction removes it entirely if self.on_evict: self.on_evict(evicted) self.data.pop(evicted, None) + else: + if self.on_evict: + self.on_evict(key) + self.data.pop(key, None) diff --git a/requirements.txt b/requirements.txt index 05fd6db..23ee700 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,3 +20,5 @@ snowflake-id==1.0.2 flagembedding==1.3.4 cryptography==45.0.2 sentence-transformers==4.1.0 +pytest>=8.0 + diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_arc_cache.py b/tests/test_arc_cache.py new file mode 100644 index 0000000..7958409 --- /dev/null +++ b/tests/test_arc_cache.py @@ -0,0 +1,208 @@ +import pytest +from modelcache.manager.eviction.arc_cache import ARC + +@pytest.fixture() +def empty_arc(): + return ARC(maxsize=4) + +@pytest.fixture() +def arc_with_data(): + c = ARC(maxsize=4) + c['a'] = 1 + c['b'] = 2 + return c + +def test_setitem_adds_to_arc(empty_arc): + """Test __setitem__ adds a key-value pair to ARC.""" + empty_arc['x'] = 123 + assert 'x' in empty_arc + assert empty_arc['x'] == 123 + +def test_setitem_overwrites_value(empty_arc): + """Test that __setitem__ overwrites existing value in ARC.""" + empty_arc['y'] = 1 + empty_arc['y'] = 55 + assert empty_arc['y'] == 55 + +def test_getitem_returns_value(arc_with_data): + """Test __getitem__ returns the correct value if present.""" + assert arc_with_data['a'] == 1 + +def test_getitem_raises_keyerror_on_missing(empty_arc): + """Test __getitem__ raises KeyError if key is missing.""" + with pytest.raises(KeyError): + _ = empty_arc['nope'] + +def test_contains_true_for_present(arc_with_data): + """Test __contains__ returns True for a present key.""" + assert 'b' in arc_with_data + +def test_contains_false_for_missing(arc_with_data): + """Test __contains__ returns False for missing key.""" + assert 'notfound' not in arc_with_data + +def test_len_reports_active_cache_size(arc_with_data): + """Test __len__ reports only active items (T1 + T2).""" + assert len(arc_with_data) == 2 + arc_with_data['c'] = 3 + assert len(arc_with_data) == 3 + +def test_pop_removes_key_from_one_list(arc_with_data): + """Test pop removes key from the first ARC list where it is found.""" + arc_with_data['ghost'] = 9 + arc_with_data.b1['ghost'] = 9 + arc_with_data.pop('ghost') + assert 'ghost' not in arc_with_data.t1 + assert 'ghost' in arc_with_data.b1 + +def test_clear_removes_all_keys(empty_arc): + """Test clear() empties all lists and resets p.""" + empty_arc['a'] = 1 + empty_arc['b'] = 2 + empty_arc.clear() + assert len(empty_arc.t1) == 0 + assert len(empty_arc.t2) == 0 + assert len(empty_arc.b1) == 0 + assert len(empty_arc.b2) == 0 + assert empty_arc.p == 0 + +def test_evict_internal_evicts_when_over_capacity(): + """Test _evict_internal evicts oldest when ARC is full.""" + evicted = [] + c = ARC(maxsize=2, on_evict=lambda keys: evicted.extend(keys)) + c['a'] = 1 + c['b'] = 2 + c['c'] = 3 + assert len(c) == 2 + assert len(evicted) >= 1 + for k in evicted: + assert k in ['a', 'b'] + +def test_eviction_callback_is_called(): + """Test on_evict callback is called on eviction.""" + evicted = [] + c = ARC(maxsize=2, on_evict=lambda keys: evicted.extend(keys)) + c['x'] = 1 + c['y'] = 2 + c['z'] = 3 + assert len(evicted) > 0 + assert all(isinstance(k, str) for k in evicted) + +def test_promote_from_t1_to_t2(empty_arc): + """Test that accessing key in T1 promotes it to T2.""" + empty_arc['foo'] = 10 + assert 'foo' in empty_arc.t1 + _ = empty_arc['foo'] + assert 'foo' in empty_arc.t2 + +def test_refresh_in_t2_updates_order(empty_arc): + """Test that repeated access in T2 keeps item in T2.""" + empty_arc['x'] = 1 + _ = empty_arc['x'] # promote to T2 + empty_arc['y'] = 2 + _ = empty_arc['x'] # refresh x in T2 + assert 'x' in empty_arc.t2 + assert empty_arc.t2.popitem(last=True)[0] == 'x' + +def test_hit_in_ghost_lists_promotes_to_t2(empty_arc): + """Test access in ghost list B1 promotes key to T2.""" + empty_arc['a'] = 1 + empty_arc['b'] = 2 + empty_arc['c'] = 3 # triggers eviction to ghost B1 + if empty_arc.b1: + ghost_key = next(iter(empty_arc.b1)) + empty_arc.__missing__ = lambda key: 999 + _ = empty_arc[ghost_key] + assert ghost_key in empty_arc.t2 + +def test_iter_lists_keys_in_order(arc_with_data): + """Test __iter__ yields keys from T1 and then T2.""" + arc_with_data['c'] = 3 + keys = list(iter(arc_with_data)) + expected = list(arc_with_data.t1.keys()) + list(arc_with_data.t2.keys()) + assert keys == expected + +def test_repr_outputs_status(empty_arc): + """Test __repr__ returns a string with cache stats.""" + r = repr(empty_arc) + assert r.startswith("ARC(") + assert "maxsize" in r + +# ----------- Additional/Edge case tests ----------- + +def test_pop_missing_key_returns_default(empty_arc): + """Test that pop() returns default if key not found in any list.""" + assert empty_arc.pop('missing', default='sentinel') == 'sentinel' + +def test_setitem_multiple_evictions(): + """Test multiple evictions in sequence do not corrupt the cache.""" + evicted = [] + c = ARC(maxsize=2, on_evict=lambda keys: evicted.extend(keys)) + c['a'] = 1 + c['b'] = 2 + c['c'] = 3 + c['d'] = 4 + assert len(c) == 2 + assert len(evicted) >= 2 + assert all(isinstance(k, str) for k in evicted) + +def test_access_promotes_b1_and_b2(empty_arc): + """Test accessing a key in B1 or B2 increases/decreases p appropriately.""" + empty_arc['a'] = 1 + empty_arc['b'] = 2 + empty_arc['c'] = 3 + if empty_arc.b1: + ghost_key = next(iter(empty_arc.b1)) + empty_arc.__missing__ = lambda key: 555 + p_before = empty_arc.p + _ = empty_arc[ghost_key] + assert empty_arc.p > p_before or empty_arc.p == empty_arc.maxsize + +def test_active_and_ghost_lists_dont_exceed_maxsize(): + """Test ghost lists (B1/B2) and active lists (T1/T2) don't exceed maxsize.""" + c = ARC(maxsize=3) + for k in ['a', 'b', 'c', 'd', 'e']: + c[k] = ord(k) + # B1 + B2 should never be larger than maxsize + assert len(c.b1) + len(c.b2) <= c.maxsize + assert len(c.t1) + len(c.t2) <= c.maxsize + +def test_clear_resets_all_lists_and_p(empty_arc): + """Test that clear() resets all lists and p after many ops.""" + for k in 'abcd': + empty_arc[k] = ord(k) + empty_arc.clear() + assert not any([empty_arc.t1, empty_arc.t2, empty_arc.b1, empty_arc.b2]) + assert empty_arc.p == 0 + +def test_repr_is_informative(empty_arc): + """Test that __repr__ outputs all important stats.""" + empty_arc['q'] = 9 + r = repr(empty_arc) + assert "t1_len" in r and "b1_len" in r and "p=" in r + +def test_setitem_duplicate_key_resets_position(empty_arc): + """Test that setting the same key again resets its position.""" + empty_arc['x'] = 10 + empty_arc['y'] = 11 + empty_arc['x'] = 99 + # x should be last in t1 + assert list(empty_arc.t1.keys())[-1] == 'x' + assert empty_arc['x'] == 99 + +def test_eviction_of_promoted_key(): + """Test that a key promoted to T2 can still be evicted if capacity is exceeded.""" + evicted = [] + c = ARC(maxsize=2, on_evict=lambda keys: evicted.extend(keys)) + c['a'] = 1 + c['b'] = 2 + _ = c['a'] # Promote 'a' to T2 + c['c'] = 3 # Evict one of the keys + assert len(c) == 2 + assert len(evicted) >= 1 + +def test_keyerror_on_missing_and_no_default(empty_arc): + """Test pop() raises KeyError if no default is given and key is missing.""" + with pytest.raises(KeyError): + empty_arc.pop('never-there') + diff --git a/tests/test_wtinylfu_cache.py b/tests/test_wtinylfu_cache.py new file mode 100644 index 0000000..21a25d1 --- /dev/null +++ b/tests/test_wtinylfu_cache.py @@ -0,0 +1,237 @@ +import pytest +from modelcache.manager.eviction.wtinylfu_cache import W2TinyLFU, CountMinSketch + +@pytest.fixture() +def empty_cache(): + return W2TinyLFU(maxsize=10, window_pct=50) + +@pytest.fixture() +def cache_with_data(): + c = W2TinyLFU(maxsize=10, window_pct=50) + c['a'] = 1 + c['b'] = 2 + return c + +@pytest.fixture() +def cms(): + return CountMinSketch(width=16, depth=2, decay_interval=5) + +def test_setitem_adds_to_cache(empty_cache): + """Test that __setitem__ adds a new key-value pair to the cache.""" + empty_cache['x'] = 100 + assert empty_cache.data['x'] == 100 + assert 'x' in empty_cache + +def test_setitem_overwrites_value(empty_cache): + """Test that __setitem__ overwrites existing value for the same key.""" + empty_cache['y'] = 1 + empty_cache['y'] = 999 + assert empty_cache.data['y'] == 999 + +def test_getitem_returns_value(cache_with_data): + """Test __getitem__ returns correct value for present key.""" + assert cache_with_data['a'] == 1 + +def test_getitem_raises_keyerror_on_missing(empty_cache): + """Test __getitem__ raises KeyError for missing key.""" + with pytest.raises(KeyError): + _ = empty_cache['not_found'] + +def test_get_method_returns_value(cache_with_data): + """Test get method returns value for existing key.""" + assert cache_with_data.get('b') == 2 + +def test_get_method_returns_default_on_missing(empty_cache): + """Test get method returns default for missing key.""" + assert empty_cache.get('zzz', default=123) == 123 + +def test_contains_true_for_present(cache_with_data): + """Test __contains__ returns True for present key.""" + assert 'a' in cache_with_data + +def test_contains_false_for_missing(cache_with_data): + """Test __contains__ returns False for missing key.""" + assert 'zzz' not in cache_with_data + +def test_delitem_removes_key(cache_with_data): + """Test __delitem__ removes a present key.""" + del cache_with_data['a'] + assert 'a' not in cache_with_data + +def test_delitem_safe_if_missing(empty_cache): + """Test __delitem__ does not raise if key is missing.""" + try: + del empty_cache['nope'] + except Exception: + pytest.fail("delitem should not raise when key is missing.") + +def test_duplicate_key_updates_value(empty_cache): + """Test that setting same key twice updates the value.""" + empty_cache['dup'] = 10 + empty_cache['dup'] = 20 + assert empty_cache['dup'] == 20 + +def test_window_size_clamped_at_least_one(): + """Test that window size is always at least 1.""" + c = W2TinyLFU(maxsize=3, window_pct=0) + assert c.window_size >= 1 + c['a'] = 1 + assert len(c.window) <= c.window_size + +def test_eviction_callback_at_least_one_eviction(): + """Test that eviction callback is called when capacity exceeded.""" + evicted = [] + c = W2TinyLFU(maxsize=3, window_pct=33, on_evict=evicted.append) + c['a'] = 1 + c['b'] = 2 + c['c'] = 3 + c['d'] = 4 + assert len(evicted) >= 1 + assert all(k in ('a', 'b', 'c', 'd') for k in evicted) + +def test_eviction_callback_evicted_keys_are_gone(): + """Test that evicted keys are not present in the cache.""" + evicted = [] + c = W2TinyLFU(maxsize=3, window_pct=33, on_evict=evicted.append) + keys = ['a', 'b', 'c', 'd'] + for i, k in enumerate(keys): + c[k] = i + for k in evicted: + assert k not in c + +def test_eviction_callback_not_called_when_under_capacity(): + """Test that eviction callback is not called when under capacity.""" + evicted = [] + c = W2TinyLFU(maxsize=4, window_pct=50, on_evict=evicted.append) + c['one'] = 1 + c['two'] = 2 + assert len(evicted) == 0 + +def test_eviction_callback_with_chain_eviction(): + """Test that multiple evictions can occur in chain scenarios.""" + evicted = [] + c = W2TinyLFU(maxsize=2, window_pct=50, on_evict=evicted.append) + c['x'] = 1 + c['y'] = 2 + c['z'] = 3 + c['w'] = 4 + assert set(evicted).issubset({'x', 'y', 'z', 'w'}) + assert len(evicted) >= 1 + +def test_evicted_key_not_accessible(): + """Test that accessing an evicted key raises KeyError.""" + c = W2TinyLFU(maxsize=2) + c['a'], c['b'] = 1, 2 + c['c'] = 3 + with pytest.raises(KeyError): + _ = c['a'] + +def test_eviction_until_empty_and_reusability(): + """Test cache remains usable after all items have been evicted.""" + c = W2TinyLFU(maxsize=2) + c['x'] = 1 + c['y'] = 2 + c['z'] = 3 + del c['y'] + c['w'] = 4 + assert len(c.data) <= 2 + +def test_high_frequency_key_survives_eviction(): + """Test that a frequently accessed key survives eviction pressure.""" + c = W2TinyLFU(maxsize=10, window_pct=40) + c['a'] = 1 + for new_key in ['b', 'c', 'd', 'e', 'f', 'g']: + c.get('a') + c[new_key] = ord(new_key) + assert 'a' in c + +def test_low_freq_key_evicted(): + """Test that an infrequently used key is evicted under pressure.""" + c = W2TinyLFU(maxsize=3, window_pct=33) + c['a'] = 1 + c['b'] = 2 + c['c'] = 3 + c['d'] = 4 + assert 'a' not in c + +@pytest.mark.parametrize("hits, expected_in_cache", [ + (0, False), + (5, True), + (15, True), +]) +def test_access_promotes_key(hits, expected_in_cache): + """Test that frequent access promotes key to survive eviction.""" + c = W2TinyLFU(maxsize=8, window_pct=40) + c['a'] = 1 + insert_keys = ['b', 'c', 'd', 'e', 'f', 'g', 'h'] + for i, k in enumerate(insert_keys): + if i < hits: + c.get('a') + c[k] = ord(k) + if hits == 0: + assert 'a' not in c + else: + assert 'a' in c + +def test_cms_estimate_increases_and_decays(cms): + """Test that CountMinSketch increases and decays correctly.""" + before = cms.estimate('test') + cms.add('test') + after_add = cms.estimate('test') + assert after_add >= before + cms.decay() + after_decay = cms.estimate('test') + assert after_decay <= after_add + +def test_admit_to_main_adds_to_probation(): + """Test _admit_to_main adds new key to probation segment.""" + c = W2TinyLFU(maxsize=10, window_pct=50) + key = 'k' + assert key not in c.probation + assert key not in c.protected + c._admit_to_main(key) + assert key in c.probation + +def test_admit_to_main_evicts_when_probation_full(): + """Test _admit_to_main evicts LRU key when probation is full.""" + evicted = [] + c = W2TinyLFU(maxsize=4, window_pct=25, on_evict=evicted.append) + for i in range(c.probation_size): + c._admit_to_main(f'p{i}') + c.data[f'p{i}'] = i + extra = 'extra' + c.data[extra] = 999 + c._admit_to_main(extra) + assert len(c.probation) == c.probation_size + assert extra in c.probation + assert len(evicted) >= 1 + +def test_admit_to_main_noop_if_already_present(): + """Test _admit_to_main does nothing if key already present.""" + c = W2TinyLFU(maxsize=10, window_pct=50) + key = 'present' + c.probation[key] = True + c._admit_to_main(key) + assert key in c.probation + c.protected[key] = True + c._admit_to_main(key) + assert key in c.protected + +def test_put_key_in_window_and_main(): + """Test that new keys are distributed into segments correctly.""" + c = W2TinyLFU(maxsize=10, window_pct=50) + c['a'] = 1 + for k in ['b', 'c', 'd']: + c[k] = ord(k) + c['e'] = 5 + total = len(c.window) + len(c.probation) + len(c.protected) + assert total <= c.maxsize + +def test_put_eviction_callback_called(): + """Test that the eviction callback is invoked when needed.""" + evicted = [] + c = W2TinyLFU(maxsize=2, on_evict=evicted.append) + c['a'] = 1 + c['b'] = 2 + c['c'] = 3 + assert len(evicted) >= 1 From c37875c6263ccdf7a0275eaa3f5d6ee6bc833412 Mon Sep 17 00:00:00 2001 From: adiaybgu Date: Sun, 15 Jun 2025 16:45:47 +0300 Subject: [PATCH 94/98] Adding locks to ARC and WTINYLFU caches and adding concurrency unit tests Co-authored-by: omerdor001 Co-authored-by: adiaybgu --- modelcache/cache.py | 6 +- modelcache/manager/data_manager.py | 28 +- modelcache/manager/eviction/arc_cache.py | 143 ++--- modelcache/manager/eviction/memory_cache.py | 13 +- modelcache/manager/eviction/wtinylfu_cache.py | 42 +- requirements.txt | 2 +- tests/test_arc_cache.py | 560 ++++++++++++++---- tests/test_wtinylfu_cache.py | 283 +++++++-- websocket4modelcache.py | 2 +- 9 files changed, 772 insertions(+), 307 deletions(-) diff --git a/modelcache/cache.py b/modelcache/cache.py index 16c744d..76d98fc 100644 --- a/modelcache/cache.py +++ b/modelcache/cache.py @@ -27,7 +27,7 @@ #==================== Cache class definition =========================# #=====================================================================# -executor = ThreadPoolExecutor(max_workers=6) +executor = ThreadPoolExecutor(max_workers=2) def response_text(cache_resp): return cache_resp['data'] @@ -300,8 +300,8 @@ async def init( config=vector_config, metric_type=similarity_metric_type, ), - eviction='WTINYLFU', - max_size=100000, + eviction='ARC', + max_size=10000, normalize=normalize, ) diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index 13d29b1..c12f414 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -182,8 +182,7 @@ def __init__( self.eviction_base = MemoryCacheEviction( policy=policy, maxsize=max_size, - clean_size=clean_size, - on_evict=self._evict_ids) + clean_size=clean_size) def save(self, questions: List[any], answers: List[any], embedding_datas: List[any], **kwargs): model = kwargs.pop("model", None) @@ -314,31 +313,6 @@ def truncate(self, model): 'ScalarDB': 'truncate scalar data failed, please check! e: {}'.format(e)} return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} - # added - def _evict_ids(self, ids, **kwargs): - model = kwargs.get("model") - if not ids or any(i is None for i in ids): - modelcache_log.warning("Skipping eviction for invalid IDs: %s", ids) - return - - if isinstance(ids,str): - ids = [ids] - - for _id in ids: - self.eviction_base.get_cache(model).pop(_id, None) - - try: - self.s.mark_deleted(ids) - modelcache_log.info("Evicted from scalar storage: %s", ids) - except Exception as e: - modelcache_log.error("Failed to delete from scalar storage: %s", str(e)) - - try: - self.v.delete(ids, model=model) - modelcache_log.info("Evicted from vector storage (model=%s): %s", model, ids) - except Exception as e: - modelcache_log.error("Failed to delete from vector storage (model=%s): %s", model, str(e)) - def flush(self): self.s.flush() self.v.flush() diff --git a/modelcache/manager/eviction/arc_cache.py b/modelcache/manager/eviction/arc_cache.py index 509fae0..ae3a260 100644 --- a/modelcache/manager/eviction/arc_cache.py +++ b/modelcache/manager/eviction/arc_cache.py @@ -1,27 +1,18 @@ from cachetools import Cache from collections import OrderedDict +from readerwriterlock import rwlock -class ARC(Cache): - """ - Adaptive Replacement Cache (ARC) implementation with on_evict callback. - Balances recency and frequency via two active lists (T1, T2) and two ghost lists (B1, B2). - Calls on_evict([key]) whenever an item is evicted from the active cache. - """ +_sentinel = object() - def __init__(self, maxsize, getsizeof=None, on_evict=None): - """ - Args: - maxsize (int): Maximum cache size. - getsizeof (callable, optional): Sizing function for items. - on_evict (callable, optional): Callback called as on_evict([key]) when a key is evicted. - """ +class ARC(Cache): + def __init__(self, maxsize, getsizeof=None): super().__init__(maxsize, getsizeof) self.t1 = OrderedDict() self.t2 = OrderedDict() self.b1 = OrderedDict() self.b2 = OrderedDict() - self.p = 0 # Adaptive target for T1 size. - self.on_evict = on_evict + self.p = 0 + self._rw_lock = rwlock.RWLockWrite() def __len__(self): return len(self.t1) + len(self.t2) @@ -30,96 +21,80 @@ def __contains__(self, key): return key in self.t1 or key in self.t2 def _evict_internal(self): - """ - Evicts items from T1 or T2 if cache is over capacity, and prunes ghost lists. - Calls on_evict for each evicted key. - """ - # Evict from T1 or T2 if active cache > maxsize while len(self.t1) + len(self.t2) > self.maxsize: if len(self.t1) > self.p or (len(self.t1) == 0 and len(self.t2) > 0): key, value = self.t1.popitem(last=False) self.b1[key] = value - if self.on_evict: - self.on_evict([key]) else: key, value = self.t2.popitem(last=False) self.b2[key] = value - if self.on_evict: - self.on_evict([key]) - # Prune ghost lists to their max lengths while len(self.b1) > (self.maxsize - self.p): self.b1.popitem(last=False) while len(self.b2) > self.p: self.b2.popitem(last=False) def __setitem__(self, key, value): - # Remove from all lists before re-inserting - for l in (self.t1, self.t2, self.b1, self.b2): - l.pop(key, None) - self.t1[key] = value - self.t1.move_to_end(key) - self._evict_internal() + with self._rw_lock.gen_wlock(): + for l in (self.t1, self.t2, self.b1, self.b2): + l.pop(key, None) + self.t1[key] = value + self.t1.move_to_end(key) + self._evict_internal() def __getitem__(self, key): - # Case 1: Hit in T1 → promote to T2 - if key in self.t1: - value = self.t1.pop(key) - self.t2[key] = value - self.t2.move_to_end(key) - self.p = max(0, self.p - 1) - self._evict_internal() - return value - # Case 2: Hit in T2 → refresh in T2 - if key in self.t2: - value = self.t2.pop(key) - self.t2[key] = value - self.t2.move_to_end(key) - self.p = min(self.maxsize, self.p + 1) - self._evict_internal() - return value - # Case 3: Hit in B1 (ghost) → fetch and promote to T2 - if key in self.b1: - self.b1.pop(key) - self.p = min(self.maxsize, self.p + 1) - self._evict_internal() - value = super().__missing__(key) - self.t2[key] = value - self.t2.move_to_end(key) - return value - # Case 4: Hit in B2 (ghost) → fetch and promote to T2 - if key in self.b2: - self.b2.pop(key) - self.p = max(0, self.p - 1) - self._evict_internal() - value = super().__missing__(key) - self.t2[key] = value - self.t2.move_to_end(key) - return value - # Case 5: Cold miss → handled by Cache base class (calls __setitem__ after __missing__) - return super().__getitem__(key) + with self._rw_lock.gen_wlock(): + if key in self.t1: + value = self.t1.pop(key) + self.t2[key] = value + self.t2.move_to_end(key) + self.p = max(0, self.p - 1) + self._evict_internal() + return value + if key in self.t2: + value = self.t2.pop(key) + self.t2[key] = value + self.t2.move_to_end(key) + self.p = min(self.maxsize, self.p + 1) + self._evict_internal() + return value + if key in self.b1: + self.b1.pop(key) + self.p = min(self.maxsize, self.p + 1) + self._evict_internal() + value = super().__missing__(key) + self.t2[key] = value + self.t2.move_to_end(key) + return value + if key in self.b2: + self.b2.pop(key) + self.p = max(0, self.p - 1) + self._evict_internal() + value = super().__missing__(key) + self.t2[key] = value + self.t2.move_to_end(key) + return value + return super().__getitem__(key) def __missing__(self, key): - """ - Override this in a subclass, or rely on direct assignment (cache[key] = value). - """ raise KeyError(key) - def pop(self, key, default=None): - """ - Remove key from all lists. - """ - for l in (self.t1, self.t2, self.b1, self.b2): - if key in l: - return l.pop(key) - return default + def pop(self, key, default=_sentinel): + with self._rw_lock.gen_wlock(): + for l in (self.t1, self.t2, self.b1, self.b2): + if key in l: + return l.pop(key) + if default is _sentinel: + raise KeyError(key) + return default def clear(self): - self.t1.clear() - self.t2.clear() - self.b1.clear() - self.b2.clear() - self.p = 0 - super().clear() + with self._rw_lock.gen_wlock(): + self.t1.clear() + self.t2.clear() + self.b1.clear() + self.b2.clear() + self.p = 0 + super().clear() def __iter__(self): yield from self.t1 diff --git a/modelcache/manager/eviction/memory_cache.py b/modelcache/manager/eviction/memory_cache.py index 8561075..347abf9 100644 --- a/modelcache/manager/eviction/memory_cache.py +++ b/modelcache/manager/eviction/memory_cache.py @@ -19,17 +19,14 @@ def wrapper(*args, **kwargs): class MemoryCacheEviction(EvictionBase): - def __init__(self, policy: str, maxsize: int, clean_size: int, on_evict: Callable[[List[Any]], None], **kwargs): + def __init__(self, policy: str, maxsize: int, clean_size: int, **kwargs): self._policy = policy.upper() self.model_to_cache = dict() self.maxsize = maxsize self.clean_size = clean_size - self.on_evict = on_evict self.kwargs = kwargs def create_cache(self, model: str): - - ################# Not integrated with on_evict yet ####################### if self._policy == "LRU": cache = cachetools.LRUCache(maxsize=self.maxsize, **self.kwargs) elif self._policy == "LFU": @@ -38,18 +35,14 @@ def create_cache(self, model: str): cache = cachetools.FIFOCache(maxsize=self.maxsize, **self.kwargs) elif self._policy == "RR": cache = cachetools.RRCache(maxsize=self.maxsize, **self.kwargs) - ########################################################################### - elif self._policy == "WTINYLFU": - cache = W2TinyLFU(maxsize=self.maxsize, on_evict=lambda x: self.on_evict(x,model=model)) + cache = W2TinyLFU(maxsize=self.maxsize) elif self._policy == "ARC": - cache = ARC(maxsize=self.maxsize, on_evict=lambda x: self.on_evict(x,model=model)) + cache = ARC(maxsize=self.maxsize) else: raise ValueError(f"Unknown policy {self.policy}") - cache.popitem = popitem_wrapper(cache.popitem, self.on_evict, self.clean_size) return cache - def put(self, objs: List[Tuple[Any, Any]], model: str): cache = self.get_cache(model) for key, value in objs: diff --git a/modelcache/manager/eviction/wtinylfu_cache.py b/modelcache/manager/eviction/wtinylfu_cache.py index f33a572..d5887f5 100644 --- a/modelcache/manager/eviction/wtinylfu_cache.py +++ b/modelcache/manager/eviction/wtinylfu_cache.py @@ -1,6 +1,6 @@ from cachetools import LRUCache, Cache +from readerwriterlock import rwlock import random -from typing import Any, Callable class CountMinSketch: def __init__(self, width=1024, depth=4, decay_interval=10000): @@ -16,7 +16,6 @@ def _hash(self, x, seed): def add(self, x): self.ops += 1 - # minimal increment est = self.estimate(x) for i, seed in enumerate(self.seeds): idx = self._hash(x, seed) @@ -35,9 +34,8 @@ def decay(self): for i in range(len(table)): table[i] >>= 1 - class W2TinyLFU(Cache): - def __init__(self, maxsize, window_pct=1, on_evict: Callable[[Any], None]=None): + def __init__(self, maxsize, window_pct=1): super().__init__(maxsize) self.window_size = max(1, int(maxsize * window_pct / 100)) rest = maxsize - self.window_size @@ -49,12 +47,13 @@ def __init__(self, maxsize, window_pct=1, on_evict: Callable[[Any], None]=None): self.protected = LRUCache(maxsize=self.protected_size) self.cms = CountMinSketch() - self.on_evict = on_evict self.data = {} + self._rw_lock = rwlock.RWLockWrite() def __setitem__(self, key, value): - self.data[key] = value - self._put(key) + with self._rw_lock.gen_wlock(): + self.data[key] = value + self._put(key) def __getitem__(self, key): val = self.get(key, default=None) @@ -66,10 +65,11 @@ def __contains__(self, key): return key in self.window or key in self.probation or key in self.protected def __delitem__(self, key): - self.data.pop(key, None) - self.window.pop(key, None) - self.probation.pop(key, None) - self.protected.pop(key, None) + with self._rw_lock.gen_wlock(): + self.data.pop(key, None) + self.window.pop(key, None) + self.probation.pop(key, None) + self.protected.pop(key, None) def get(self, key, default=None): if key in self.window: @@ -80,7 +80,6 @@ def get(self, key, default=None): return self.data.get(key, default) if key in self.probation: self.probation.pop(key) - # demote LRU from protected if full if len(self.protected) >= self.protected_size: demoted = next(iter(self.protected)) self.protected.pop(demoted) @@ -94,12 +93,10 @@ def _put(self, key): if key in self: return - # admission to window if len(self.window) < self.window_size: self.window[key] = True return - # window full: victim is LRU victim = next(iter(self.window)) self.window.pop(victim) @@ -107,19 +104,13 @@ def _put(self, key): self._admit_to_main(victim) self._admit_to_main(key) else: - # victim stronger or equal: victim enters main, key is dropped self._admit_to_main(victim) - # actually evicts key entirely - if self.on_evict: - self.on_evict(key) self.data.pop(key, None) def _admit_to_main(self, key): if key in self.protected or key in self.probation: return if self.probation_size == 0: - if self.on_evict: - self.on_evict(key) self.data.pop(key, None) return if len(self.probation) < self.probation_size: @@ -128,10 +119,13 @@ def _admit_to_main(self, key): evicted = next(iter(self.probation)) self.probation.pop(evicted) self.probation[key] = True - if self.on_evict: - self.on_evict(evicted) self.data.pop(evicted, None) else: - if self.on_evict: - self.on_evict(key) self.data.pop(key, None) + + def clear(self): + with self._rw_lock.gen_wlock(): + self.window.clear() + self.probation.clear() + self.protected.clear() + self.data.clear() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 23ee700..2db7121 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,4 +21,4 @@ flagembedding==1.3.4 cryptography==45.0.2 sentence-transformers==4.1.0 pytest>=8.0 - +readerwriterlock==1.0.9 diff --git a/tests/test_arc_cache.py b/tests/test_arc_cache.py index 7958409..729cb86 100644 --- a/tests/test_arc_cache.py +++ b/tests/test_arc_cache.py @@ -1,19 +1,27 @@ import pytest +import threading +import time from modelcache.manager.eviction.arc_cache import ARC +# ----------- Fixtures ----------- + @pytest.fixture() def empty_arc(): + # Create an empty ARC cache for tests return ARC(maxsize=4) @pytest.fixture() def arc_with_data(): + # Create an ARC cache pre-filled with two items c = ARC(maxsize=4) c['a'] = 1 c['b'] = 2 return c +# ----------- Basic Functionality Tests ----------- + def test_setitem_adds_to_arc(empty_arc): - """Test __setitem__ adds a key-value pair to ARC.""" + """Test that __setitem__ adds a key-value pair to ARC.""" empty_arc['x'] = 123 assert 'x' in empty_arc assert empty_arc['x'] == 123 @@ -24,39 +32,80 @@ def test_setitem_overwrites_value(empty_arc): empty_arc['y'] = 55 assert empty_arc['y'] == 55 +def test_setitem_none_value(empty_arc): + """Test that __setitem__ can store None as a value.""" + empty_arc['foo'] = None + assert empty_arc['foo'] is None + +def test_duplicate_key_updates_value(empty_arc): + """Test that setting the same key twice updates the value.""" + empty_arc['dup'] = 10 + empty_arc['dup'] = 20 + assert empty_arc['dup'] == 20 + def test_getitem_returns_value(arc_with_data): - """Test __getitem__ returns the correct value if present.""" + """Test that __getitem__ returns the correct value if present.""" assert arc_with_data['a'] == 1 def test_getitem_raises_keyerror_on_missing(empty_arc): - """Test __getitem__ raises KeyError if key is missing.""" + """Test that __getitem__ raises KeyError if key is missing.""" with pytest.raises(KeyError): _ = empty_arc['nope'] def test_contains_true_for_present(arc_with_data): - """Test __contains__ returns True for a present key.""" + """Test that __contains__ returns True for a present key.""" assert 'b' in arc_with_data def test_contains_false_for_missing(arc_with_data): - """Test __contains__ returns False for missing key.""" + """Test that __contains__ returns False for missing key.""" assert 'notfound' not in arc_with_data def test_len_reports_active_cache_size(arc_with_data): - """Test __len__ reports only active items (T1 + T2).""" + """Test that __len__ reports only active items (T1 + T2).""" assert len(arc_with_data) == 2 arc_with_data['c'] = 3 assert len(arc_with_data) == 3 def test_pop_removes_key_from_one_list(arc_with_data): - """Test pop removes key from the first ARC list where it is found.""" + """Test that pop removes key from the first ARC list where it is found.""" arc_with_data['ghost'] = 9 - arc_with_data.b1['ghost'] = 9 + with arc_with_data._rw_lock.gen_wlock(): + arc_with_data.b1['ghost'] = 9 # Simulate ghost in B1 (protected by write lock) arc_with_data.pop('ghost') - assert 'ghost' not in arc_with_data.t1 - assert 'ghost' in arc_with_data.b1 + with arc_with_data._rw_lock.gen_rlock(): + assert 'ghost' not in arc_with_data.t1 + assert 'ghost' in arc_with_data.b1 + +def test_pop_missing_key_returns_default(empty_arc): + """Test that pop() returns default if key not found in any list.""" + assert empty_arc.pop('missing', default='sentinel') == 'sentinel' + +def test_pop_evicted_key_returns_default(empty_arc): + """Test that pop() on a recently evicted key returns default.""" + empty_arc['a'] = 1 + empty_arc['b'] = 2 + empty_arc['c'] = 3 + empty_arc['d'] = 4 + empty_arc['e'] = 5 + for key in ['a', 'b', 'c', 'd']: + try: + empty_arc.pop(key) + except KeyError: + pass + except Exception: + pytest.fail("Unexpected exception on pop after eviction.") + +def test_pop_with_none_default(empty_arc): + """Test pop returns None when default=None and key is missing.""" + assert empty_arc.pop('not_in_cache', default=None) is None + +def test_keyerror_on_missing_and_no_default(empty_arc): + """Test that pop() raises KeyError if no default is given and key is missing.""" + with pytest.raises(KeyError): + empty_arc.pop('never-there') def test_clear_removes_all_keys(empty_arc): - """Test clear() empties all lists and resets p.""" + """Test that clear() empties all lists and resets p.""" empty_arc['a'] = 1 empty_arc['b'] = 2 empty_arc.clear() @@ -66,114 +115,180 @@ def test_clear_removes_all_keys(empty_arc): assert len(empty_arc.b2) == 0 assert empty_arc.p == 0 +def test_clear_on_empty_cache(empty_arc): + """Test clear() does not raise when cache is already empty.""" + empty_arc.clear() + assert len(empty_arc) == 0 + +# ----------- Eviction and Promotion Mechanics ----------- + def test_evict_internal_evicts_when_over_capacity(): - """Test _evict_internal evicts oldest when ARC is full.""" - evicted = [] - c = ARC(maxsize=2, on_evict=lambda keys: evicted.extend(keys)) + """Test that _evict_internal evicts oldest when ARC is full.""" + c = ARC(maxsize=2) c['a'] = 1 c['b'] = 2 c['c'] = 3 assert len(c) == 2 + with c._rw_lock.gen_rlock(): + present = set(c.t1.keys()) | set(c.t2.keys()) + assert 'c' in present + assert len(present & {'a', 'b'}) == 1 + +def test_getitem_after_eviction(empty_arc): + """Test that accessing an evicted key raises KeyError.""" + empty_arc['x'] = 1 + empty_arc['y'] = 2 + empty_arc['z'] = 3 + empty_arc['w'] = 4 + empty_arc['v'] = 5 # Triggers eviction + evicted = [k for k in ['x', 'y', 'z', 'w'] if k not in empty_arc] assert len(evicted) >= 1 for k in evicted: - assert k in ['a', 'b'] - -def test_eviction_callback_is_called(): - """Test on_evict callback is called on eviction.""" - evicted = [] - c = ARC(maxsize=2, on_evict=lambda keys: evicted.extend(keys)) - c['x'] = 1 - c['y'] = 2 - c['z'] = 3 - assert len(evicted) > 0 - assert all(isinstance(k, str) for k in evicted) + with pytest.raises(KeyError): + _ = empty_arc[k] + +def test_setitem_multiple_evictions(): + """Test that multiple evictions in sequence do not corrupt the cache.""" + c = ARC(maxsize=2) + c['a'] = 1 + c['b'] = 2 + c['c'] = 3 + c['d'] = 4 + assert len(c) == 2 + with c._rw_lock.gen_rlock(): + present = set(c.t1.keys()) | set(c.t2.keys()) + assert len(present & {'a', 'b', 'c', 'd'}) == 2 + +def test_setitem_eviction_does_not_raise(empty_arc): + """Test that inserting over maxsize repeatedly never raises.""" + try: + for i in range(20): + empty_arc[f'k{i}'] = i + except Exception: + pytest.fail("Eviction during setitem should never raise.") def test_promote_from_t1_to_t2(empty_arc): """Test that accessing key in T1 promotes it to T2.""" empty_arc['foo'] = 10 - assert 'foo' in empty_arc.t1 + with empty_arc._rw_lock.gen_rlock(): + assert 'foo' in empty_arc.t1 _ = empty_arc['foo'] - assert 'foo' in empty_arc.t2 + with empty_arc._rw_lock.gen_rlock(): + assert 'foo' in empty_arc.t2 def test_refresh_in_t2_updates_order(empty_arc): - """Test that repeated access in T2 keeps item in T2.""" + """Test that repeated access in T2 keeps item in T2 and updates order.""" empty_arc['x'] = 1 - _ = empty_arc['x'] # promote to T2 + _ = empty_arc['x'] empty_arc['y'] = 2 - _ = empty_arc['x'] # refresh x in T2 - assert 'x' in empty_arc.t2 - assert empty_arc.t2.popitem(last=True)[0] == 'x' + _ = empty_arc['x'] + with empty_arc._rw_lock.gen_rlock(): + assert 'x' in empty_arc.t2 + assert empty_arc.t2.popitem(last=True)[0] == 'x' + +def test_setitem_duplicate_key_resets_position(empty_arc): + """Test that setting the same key again resets its position.""" + empty_arc['x'] = 10 + empty_arc['y'] = 11 + empty_arc['x'] = 99 + with empty_arc._rw_lock.gen_rlock(): + assert list(empty_arc.t1.keys())[-1] == 'x' + assert empty_arc['x'] == 99 def test_hit_in_ghost_lists_promotes_to_t2(empty_arc): - """Test access in ghost list B1 promotes key to T2.""" + """Test that access in ghost list B1 promotes key to T2.""" empty_arc['a'] = 1 empty_arc['b'] = 2 - empty_arc['c'] = 3 # triggers eviction to ghost B1 - if empty_arc.b1: - ghost_key = next(iter(empty_arc.b1)) + empty_arc['c'] = 3 + with empty_arc._rw_lock.gen_rlock(): + b1_keys = list(empty_arc.b1.keys()) + if b1_keys: + ghost_key = b1_keys[0] empty_arc.__missing__ = lambda key: 999 _ = empty_arc[ghost_key] - assert ghost_key in empty_arc.t2 - -def test_iter_lists_keys_in_order(arc_with_data): - """Test __iter__ yields keys from T1 and then T2.""" - arc_with_data['c'] = 3 - keys = list(iter(arc_with_data)) - expected = list(arc_with_data.t1.keys()) + list(arc_with_data.t2.keys()) - assert keys == expected - -def test_repr_outputs_status(empty_arc): - """Test __repr__ returns a string with cache stats.""" - r = repr(empty_arc) - assert r.startswith("ARC(") - assert "maxsize" in r - -# ----------- Additional/Edge case tests ----------- - -def test_pop_missing_key_returns_default(empty_arc): - """Test that pop() returns default if key not found in any list.""" - assert empty_arc.pop('missing', default='sentinel') == 'sentinel' - -def test_setitem_multiple_evictions(): - """Test multiple evictions in sequence do not corrupt the cache.""" - evicted = [] - c = ARC(maxsize=2, on_evict=lambda keys: evicted.extend(keys)) - c['a'] = 1 - c['b'] = 2 - c['c'] = 3 - c['d'] = 4 - assert len(c) == 2 - assert len(evicted) >= 2 - assert all(isinstance(k, str) for k in evicted) + with empty_arc._rw_lock.gen_rlock(): + assert ghost_key in empty_arc.t2 def test_access_promotes_b1_and_b2(empty_arc): - """Test accessing a key in B1 or B2 increases/decreases p appropriately.""" + """Test that accessing a key in B1 or B2 increases/decreases p appropriately.""" empty_arc['a'] = 1 empty_arc['b'] = 2 empty_arc['c'] = 3 - if empty_arc.b1: - ghost_key = next(iter(empty_arc.b1)) + with empty_arc._rw_lock.gen_rlock(): + b1_keys = list(empty_arc.b1.keys()) + if b1_keys: + ghost_key = b1_keys[0] empty_arc.__missing__ = lambda key: 555 - p_before = empty_arc.p + with empty_arc._rw_lock.gen_rlock(): + p_before = empty_arc.p _ = empty_arc[ghost_key] - assert empty_arc.p > p_before or empty_arc.p == empty_arc.maxsize + with empty_arc._rw_lock.gen_rlock(): + assert empty_arc.p > p_before or empty_arc.p == empty_arc.maxsize + +def test_evict_when_t1_empty_promotes_t2(empty_arc): + """ + Test that promoting an item to T2 keeps it in cache, while new items fill T1. + """ + empty_arc['a'] = 1 + _ = empty_arc['a'] # promote to T2 + empty_arc['b'] = 2 + empty_arc['c'] = 3 + empty_arc['d'] = 4 + empty_arc['e'] = 5 # should cause evictions + with empty_arc._rw_lock.gen_rlock(): + assert 'a' in empty_arc.t2 + t1_keys = list(empty_arc.t1.keys()) + t2_keys = list(empty_arc.t2.keys()) + assert len(t1_keys) + len(t2_keys) == empty_arc.maxsize + assert 'a' not in t1_keys + for k in t1_keys: + assert k in ['b', 'c', 'd', 'e'] def test_active_and_ghost_lists_dont_exceed_maxsize(): - """Test ghost lists (B1/B2) and active lists (T1/T2) don't exceed maxsize.""" + """Test that ghost lists (B1/B2) and active lists (T1/T2) don't exceed maxsize.""" c = ARC(maxsize=3) for k in ['a', 'b', 'c', 'd', 'e']: c[k] = ord(k) - # B1 + B2 should never be larger than maxsize - assert len(c.b1) + len(c.b2) <= c.maxsize - assert len(c.t1) + len(c.t2) <= c.maxsize + with c._rw_lock.gen_rlock(): + assert len(c.b1) + len(c.b2) <= c.maxsize + assert len(c.t1) + len(c.t2) <= c.maxsize def test_clear_resets_all_lists_and_p(empty_arc): """Test that clear() resets all lists and p after many ops.""" for k in 'abcd': empty_arc[k] = ord(k) empty_arc.clear() - assert not any([empty_arc.t1, empty_arc.t2, empty_arc.b1, empty_arc.b2]) - assert empty_arc.p == 0 + with empty_arc._rw_lock.gen_rlock(): + assert not any([empty_arc.t1, empty_arc.t2, empty_arc.b1, empty_arc.b2]) + assert empty_arc.p == 0 + +# ----------- Iteration and Representation ----------- + +def test_iter_lists_keys_in_order(arc_with_data): + """Test that __iter__ yields keys from T1 and then T2.""" + arc_with_data['c'] = 3 + keys = list(iter(arc_with_data)) + with arc_with_data._rw_lock.gen_rlock(): + expected = list(arc_with_data.t1.keys()) + list(arc_with_data.t2.keys()) + assert keys == expected + +def test_iter_empty_arc(empty_arc): + """Test that iterating an empty ARC yields nothing.""" + assert list(iter(empty_arc)) == [] + +def test_repr_outputs_status(empty_arc): + """Test that __repr__ returns a string with cache stats.""" + r = repr(empty_arc) + assert r.startswith("ARC(") + assert "maxsize" in r + +def test_repr_reflects_content(empty_arc): + """Test __repr__ shows correct lengths after operations.""" + empty_arc['foo'] = 123 + empty_arc['bar'] = 321 + r = repr(empty_arc) + assert f"len={len(empty_arc)}" in r + assert f"t1_len={len(empty_arc.t1)}" in r def test_repr_is_informative(empty_arc): """Test that __repr__ outputs all important stats.""" @@ -181,28 +296,271 @@ def test_repr_is_informative(empty_arc): r = repr(empty_arc) assert "t1_len" in r and "b1_len" in r and "p=" in r -def test_setitem_duplicate_key_resets_position(empty_arc): - """Test that setting the same key again resets its position.""" - empty_arc['x'] = 10 - empty_arc['y'] = 11 - empty_arc['x'] = 99 - # x should be last in t1 - assert list(empty_arc.t1.keys())[-1] == 'x' - assert empty_arc['x'] == 99 +# ----------- Concurrency and Locking ----------- -def test_eviction_of_promoted_key(): - """Test that a key promoted to T2 can still be evicted if capacity is exceeded.""" - evicted = [] - c = ARC(maxsize=2, on_evict=lambda keys: evicted.extend(keys)) - c['a'] = 1 - c['b'] = 2 - _ = c['a'] # Promote 'a' to T2 - c['c'] = 3 # Evict one of the keys - assert len(c) == 2 - assert len(evicted) >= 1 +def test_concurrent_setitem_and_getitem(): + """Test concurrent __setitem__ and __getitem__ do not corrupt cache state.""" + arc = ARC(maxsize=8) + keys = [f'k{i}' for i in range(16)] + exceptions = [] -def test_keyerror_on_missing_and_no_default(empty_arc): - """Test pop() raises KeyError if no default is given and key is missing.""" - with pytest.raises(KeyError): - empty_arc.pop('never-there') + def writer(): + for k in keys: + try: + arc[k] = ord(k[-1]) + time.sleep(0.001) + except Exception as e: + exceptions.append(e) + + def reader(): + for _ in range(20): + for k in keys: + try: + _ = arc.get(k, None) + except Exception as e: + exceptions.append(e) + time.sleep(0.001) + + threads = [threading.Thread(target=writer)] + [threading.Thread(target=reader) for _ in range(4)] + for t in threads: + t.start() + for t in threads: + t.join() + assert not exceptions, f"Exceptions in threads: {exceptions}" + +def test_concurrent_pop_and_setitem(): + """Test concurrent pop and setitem maintain cache integrity.""" + arc = ARC(maxsize=5) + for k in range(5): + arc[f'x{k}'] = k + + pop_exceptions = [] + set_exceptions = [] + + def popper(): + for _ in range(10): + try: + arc.pop(f'x{_ % 5}', default=None) + except KeyError: + pass # Acceptable in concurrency + except Exception as e: + pop_exceptions.append(e) + + def setter(): + for i in range(10, 20): + try: + arc[f'x{i % 5}'] = i + except Exception as e: + set_exceptions.append(e) + + t1 = threading.Thread(target=popper) + t2 = threading.Thread(target=setter) + t1.start() + t2.start() + t1.join() + t2.join() + assert not set_exceptions, f"Exceptions in setter: {set_exceptions}" + assert not pop_exceptions, f"Unexpected exceptions in popper: {pop_exceptions}" + +def test_concurrent_iterators_with_modifications(): + """Test that iterating keys while modifying cache is safe and doesn't throw.""" + arc = ARC(maxsize=7) + for i in range(7): + arc[f'k{i}'] = i + + iter_exceptions = [] + + def iterate(): + try: + for _ in range(10): + list(iter(arc)) + time.sleep(0.002) + except Exception as e: + iter_exceptions.append(e) + + def modifier(): + for i in range(10, 20): + arc[f'k{i%7}'] = i + time.sleep(0.001) + + t1 = threading.Thread(target=iterate) + t2 = threading.Thread(target=modifier) + t1.start() + t2.start() + t1.join() + t2.join() + assert not iter_exceptions, f"Exceptions during concurrent iteration: {iter_exceptions}" + +def test_concurrent_clear_and_get(): + """Test that clear and getitem do not deadlock or throw under concurrent access.""" + arc = ARC(maxsize=10) + for i in range(10): + arc[f'c{i}'] = i + + clear_exceptions = [] + get_exceptions = [] + + def clearer(): + for _ in range(5): + try: + arc.clear() + time.sleep(0.005) + except Exception as e: + clear_exceptions.append(e) + + def getter(): + for _ in range(15): + for i in range(10): + try: + _ = arc.get(f'c{i}', None) + except Exception as e: + get_exceptions.append(e) + time.sleep(0.001) + + t1 = threading.Thread(target=clearer) + t2 = threading.Thread(target=getter) + t1.start() + t2.start() + t1.join() + t2.join() + assert not clear_exceptions and not get_exceptions, f"Exceptions: {clear_exceptions}, {get_exceptions}" + +def test_lock_allows_multiple_readers_but_exclusive_writer(): + """Test rwlock allows multiple readers at once but only one writer at a time.""" + arc = ARC(maxsize=4) + shared_counter = 0 + read_count = [] + write_count = [] + + def reader(): + nonlocal shared_counter + with arc._rw_lock.gen_rlock(): + val = shared_counter + time.sleep(0.002) + read_count.append(val) + + def writer(): + nonlocal shared_counter + with arc._rw_lock.gen_wlock(): + current = shared_counter + shared_counter = current + 1 + time.sleep(0.003) + write_count.append(shared_counter) + + threads = [] + for _ in range(3): + threads.append(threading.Thread(target=reader)) + threads.append(threading.Thread(target=writer)) + for t in threads: + t.start() + for t in threads: + t.join() + assert any(r == read_count[0] for r in read_count[1:]), "Multiple readers did not overlap" + assert len(write_count) == 1 + +def test_concurrent_duplicate_setitem(): + """Test that concurrent setitem on the same key does not corrupt cache.""" + arc = ARC(maxsize=3) + exception_list = [] + def setter(val): + try: + for _ in range(20): + arc['dup'] = val + time.sleep(0.0005) + except Exception as e: + exception_list.append(e) + + threads = [threading.Thread(target=setter, args=(v,)) for v in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + assert 'dup' in arc + assert arc['dup'] in (0, 1, 2) + assert not exception_list, f"Exceptions: {exception_list}" + +def test_concurrent_clear_and_setitem(): + """Test that clear and setitem do not cause errors in parallel.""" + arc = ARC(maxsize=4) + set_errors = [] + clear_errors = [] + stop = threading.Event() + + def clearer(): + while not stop.is_set(): + try: + arc.clear() + time.sleep(0.001) + except Exception as e: + clear_errors.append(e) + + def setter(): + for i in range(30): + try: + arc[f'k{i%4}'] = i + time.sleep(0.0005) + except Exception as e: + set_errors.append(e) + stop.set() + + t1 = threading.Thread(target=clearer) + t2 = threading.Thread(target=setter) + t1.start() + t2.start() + t1.join() + t2.join() + assert not set_errors, f"Setitem exceptions: {set_errors}" + assert not clear_errors, f"Clear exceptions: {clear_errors}" + +def test_concurrent_pop_with_contention(): + """Test multiple threads popping the same and different keys.""" + arc = ARC(maxsize=2) + for k in ('a', 'b'): + arc[k] = k + errors = [] + def popper(key): + for _ in range(8): + try: + arc.pop(key, default=None) + time.sleep(0.0005) + except Exception as e: + errors.append(e) + t1 = threading.Thread(target=popper, args=('a',)) + t2 = threading.Thread(target=popper, args=('b',)) + t3 = threading.Thread(target=popper, args=('a',)) + t1.start() + t2.start() + t3.start() + t1.join() + t2.join() + t3.join() + assert not errors, f"Exceptions: {errors}" + +def test_concurrent_len_and_setitem(): + """Test that __len__ and __setitem__ are safe in concurrent usage.""" + arc = ARC(maxsize=10) + errors = [] + stop = threading.Event() + def setter(): + for i in range(50): + try: + arc[f'k{i%10}'] = i + time.sleep(0.0002) + except Exception as e: + errors.append(e) + stop.set() + def length_checker(): + while not stop.is_set(): + try: + _ = len(arc) + time.sleep(0.0002) + except Exception as e: + errors.append(e) + t1 = threading.Thread(target=setter) + t2 = threading.Thread(target=length_checker) + t1.start() + t2.start() + t1.join() + t2.join() + assert not errors, f"Exceptions: {errors}" diff --git a/tests/test_wtinylfu_cache.py b/tests/test_wtinylfu_cache.py index 21a25d1..3218d0a 100644 --- a/tests/test_wtinylfu_cache.py +++ b/tests/test_wtinylfu_cache.py @@ -1,12 +1,18 @@ import pytest +import threading +import time from modelcache.manager.eviction.wtinylfu_cache import W2TinyLFU, CountMinSketch +# ----------- Fixtures ----------- + @pytest.fixture() def empty_cache(): + # Returns an empty W2TinyLFU cache for tests return W2TinyLFU(maxsize=10, window_pct=50) @pytest.fixture() def cache_with_data(): + # Returns a W2TinyLFU cache with two preset items c = W2TinyLFU(maxsize=10, window_pct=50) c['a'] = 1 c['b'] = 2 @@ -14,8 +20,11 @@ def cache_with_data(): @pytest.fixture() def cms(): + # Returns a CountMinSketch for CMS tests return CountMinSketch(width=16, depth=2, decay_interval=5) +# ----------- Basic Functionality ----------- + def test_setitem_adds_to_cache(empty_cache): """Test that __setitem__ adds a new key-value pair to the cache.""" empty_cache['x'] = 100 @@ -78,53 +87,30 @@ def test_window_size_clamped_at_least_one(): c['a'] = 1 assert len(c.window) <= c.window_size -def test_eviction_callback_at_least_one_eviction(): - """Test that eviction callback is called when capacity exceeded.""" - evicted = [] - c = W2TinyLFU(maxsize=3, window_pct=33, on_evict=evicted.append) - c['a'] = 1 - c['b'] = 2 - c['c'] = 3 - c['d'] = 4 - assert len(evicted) >= 1 - assert all(k in ('a', 'b', 'c', 'd') for k in evicted) - -def test_eviction_callback_evicted_keys_are_gone(): - """Test that evicted keys are not present in the cache.""" - evicted = [] - c = W2TinyLFU(maxsize=3, window_pct=33, on_evict=evicted.append) - keys = ['a', 'b', 'c', 'd'] - for i, k in enumerate(keys): - c[k] = i - for k in evicted: - assert k not in c - -def test_eviction_callback_not_called_when_under_capacity(): - """Test that eviction callback is not called when under capacity.""" - evicted = [] - c = W2TinyLFU(maxsize=4, window_pct=50, on_evict=evicted.append) - c['one'] = 1 - c['two'] = 2 - assert len(evicted) == 0 - -def test_eviction_callback_with_chain_eviction(): - """Test that multiple evictions can occur in chain scenarios.""" - evicted = [] - c = W2TinyLFU(maxsize=2, window_pct=50, on_evict=evicted.append) - c['x'] = 1 - c['y'] = 2 - c['z'] = 3 - c['w'] = 4 - assert set(evicted).issubset({'x', 'y', 'z', 'w'}) - assert len(evicted) >= 1 +def test_clear_empties_cache(empty_cache): + """Test that clear() empties the cache.""" + empty_cache['x'] = 1 + empty_cache['y'] = 2 + empty_cache.clear() + assert len(empty_cache.data) == 0 + assert list(empty_cache.window) == [] + assert list(empty_cache.probation) == [] + assert list(empty_cache.protected) == [] + +# ----------- Eviction and Frequency ----------- def test_evicted_key_not_accessible(): """Test that accessing an evicted key raises KeyError.""" c = W2TinyLFU(maxsize=2) c['a'], c['b'] = 1, 2 c['c'] = 3 + # Only two elements can be present, so one must be gone with pytest.raises(KeyError): - _ = c['a'] + # Either 'a' or 'b' must be gone; test both if needed + try: + _ = c['a'] + except KeyError: + _ = c['b'] def test_eviction_until_empty_and_reusability(): """Test cache remains usable after all items have been evicted.""" @@ -152,7 +138,9 @@ def test_low_freq_key_evicted(): c['b'] = 2 c['c'] = 3 c['d'] = 4 - assert 'a' not in c + # At least one of the early keys must be gone (usually 'a') + evicted = {'a', 'b', 'c'} - set([k for k in c]) + assert len(evicted) >= 1 @pytest.mark.parametrize("hits, expected_in_cache", [ (0, False), @@ -168,10 +156,7 @@ def test_access_promotes_key(hits, expected_in_cache): if i < hits: c.get('a') c[k] = ord(k) - if hits == 0: - assert 'a' not in c - else: - assert 'a' in c + assert ('a' in c) == expected_in_cache def test_cms_estimate_increases_and_decays(cms): """Test that CountMinSketch increases and decays correctly.""" @@ -183,6 +168,8 @@ def test_cms_estimate_increases_and_decays(cms): after_decay = cms.estimate('test') assert after_decay <= after_add +# ----------- Segment and Admission Logic ----------- + def test_admit_to_main_adds_to_probation(): """Test _admit_to_main adds new key to probation segment.""" c = W2TinyLFU(maxsize=10, window_pct=50) @@ -194,17 +181,17 @@ def test_admit_to_main_adds_to_probation(): def test_admit_to_main_evicts_when_probation_full(): """Test _admit_to_main evicts LRU key when probation is full.""" - evicted = [] - c = W2TinyLFU(maxsize=4, window_pct=25, on_evict=evicted.append) + c = W2TinyLFU(maxsize=4, window_pct=25) for i in range(c.probation_size): c._admit_to_main(f'p{i}') c.data[f'p{i}'] = i extra = 'extra' c.data[extra] = 999 + old_probation_keys = set(c.probation.keys()) c._admit_to_main(extra) assert len(c.probation) == c.probation_size assert extra in c.probation - assert len(evicted) >= 1 + assert len(old_probation_keys - set(c.probation.keys())) >= 1 def test_admit_to_main_noop_if_already_present(): """Test _admit_to_main does nothing if key already present.""" @@ -227,11 +214,195 @@ def test_put_key_in_window_and_main(): total = len(c.window) + len(c.probation) + len(c.protected) assert total <= c.maxsize -def test_put_eviction_callback_called(): - """Test that the eviction callback is invoked when needed.""" - evicted = [] - c = W2TinyLFU(maxsize=2, on_evict=evicted.append) - c['a'] = 1 - c['b'] = 2 - c['c'] = 3 - assert len(evicted) >= 1 +# ----------- Extra Edge Cases ----------- + +def test_clear_on_empty_cache(): + """Test clear() on an already empty cache does not fail.""" + c = W2TinyLFU(maxsize=3) + c.clear() + assert len(c.data) == 0 + +def test_get_returns_default_if_not_present(): + """Test get() returns default if the key is missing.""" + c = W2TinyLFU(maxsize=3) + assert c.get('notfound', default=777) == 777 + +def test_cache_survives_rapid_inserts_and_deletes(): + """Test cache remains consistent under rapid inserts/deletes.""" + c = W2TinyLFU(maxsize=3) + for i in range(30): + c[f'k{i%3}'] = i + if i % 2 == 0: + del c[f'k{(i+1)%3}'] + assert len(c.data) <= 3 + +def test_cache_never_exceeds_maxsize(): + """Test that cache never exceeds its declared maxsize.""" + c = W2TinyLFU(maxsize=5) + for i in range(20): + c[f'x{i}'] = i + assert len(c.window) + len(c.probation) + len(c.protected) <= 5 + +# ----------- Concurrency Tests ----------- + +def test_concurrent_setitem_and_getitem(): + """Test concurrent __setitem__ and __getitem__ do not corrupt cache state.""" + cache = W2TinyLFU(maxsize=8, window_pct=50) + keys = [f'k{i}' for i in range(16)] + exceptions = [] + + def writer(): + for k in keys: + try: + cache[k] = ord(k[-1]) + time.sleep(0.001) + except Exception as e: + exceptions.append(e) + + def reader(): + for _ in range(20): + for k in keys: + try: + _ = cache.get(k, None) + except Exception as e: + exceptions.append(e) + time.sleep(0.001) + + threads = [threading.Thread(target=writer)] + [threading.Thread(target=reader) for _ in range(4)] + for t in threads: + t.start() + for t in threads: + t.join() + assert not exceptions, f"Exceptions in threads: {exceptions}" + +def test_concurrent_delitem_and_setitem(): + """Test concurrent __delitem__ and __setitem__ do not cause errors or corruption.""" + cache = W2TinyLFU(maxsize=6, window_pct=50) + for k in range(6): + cache[f'x{k}'] = k + + del_exceptions = [] + set_exceptions = [] + + def deleter(): + for _ in range(12): + try: + del cache[f'x{_ % 6}'] + except KeyError: + pass # Acceptable: may not be present + except Exception as e: + del_exceptions.append(e) + time.sleep(0.001) + + def setter(): + for i in range(12, 24): + try: + cache[f'x{i % 6}'] = i + except Exception as e: + set_exceptions.append(e) + time.sleep(0.001) + + t1 = threading.Thread(target=deleter) + t2 = threading.Thread(target=setter) + t1.start() + t2.start() + t1.join() + t2.join() + assert not del_exceptions, f"Exceptions in deleter: {del_exceptions}" + assert not set_exceptions, f"Exceptions in setter: {set_exceptions}" + +def test_concurrent_iterators_with_modifications(): + """Test that iterating over keys while modifying the cache does not throw.""" + cache = W2TinyLFU(maxsize=7, window_pct=50) + for i in range(7): + cache[f'k{i}'] = i + + iter_exceptions = [] + + def iterate(): + try: + for _ in range(10): + list(cache.data.keys()) + time.sleep(0.002) + except Exception as e: + iter_exceptions.append(e) + + def modifier(): + for i in range(10, 20): + cache[f'k{i%7}'] = i + time.sleep(0.001) + + t1 = threading.Thread(target=iterate) + t2 = threading.Thread(target=modifier) + t1.start() + t2.start() + t1.join() + t2.join() + assert not iter_exceptions, f"Exceptions during concurrent iteration: {iter_exceptions}" + +def test_concurrent_clear_and_get(): + """Test that clear and get do not deadlock or throw under concurrent access.""" + cache = W2TinyLFU(maxsize=10, window_pct=50) + for i in range(10): + cache[f'c{i}'] = i + + clear_exceptions = [] + get_exceptions = [] + + def clearer(): + for _ in range(5): + try: + cache.clear() + time.sleep(0.005) + except Exception as e: + clear_exceptions.append(e) + + def getter(): + for _ in range(15): + for i in range(10): + try: + _ = cache.get(f'c{i}', None) + except Exception as e: + get_exceptions.append(e) + time.sleep(0.001) + + t1 = threading.Thread(target=clearer) + t2 = threading.Thread(target=getter) + t1.start() + t2.start() + t1.join() + t2.join() + assert not clear_exceptions, f"Exceptions: {clear_exceptions}" + assert not get_exceptions, f"Exceptions: {get_exceptions}" + +def test_lock_allows_multiple_readers_but_exclusive_writer(): + """Test rwlock allows multiple readers at once but only one writer at a time.""" + cache = W2TinyLFU(maxsize=4, window_pct=50) + shared_counter = [0] + read_count = [] + write_count = [] + + def reader(): + with cache._rw_lock.gen_rlock(): + val = shared_counter[0] + time.sleep(0.002) + read_count.append(val) + + def writer(): + with cache._rw_lock.gen_wlock(): + current = shared_counter[0] + shared_counter[0] = current + 1 + time.sleep(0.003) + write_count.append(shared_counter[0]) + + threads = [] + for _ in range(3): + threads.append(threading.Thread(target=reader)) + threads.append(threading.Thread(target=writer)) + for t in threads: + t.start() + for t in threads: + t.join() + assert any(r == read_count[0] for r in read_count[1:]), "Multiple readers did not overlap" + assert len(write_count) == 1 + diff --git a/websocket4modelcache.py b/websocket4modelcache.py index fe99cd5..4543f03 100644 --- a/websocket4modelcache.py +++ b/websocket4modelcache.py @@ -15,7 +15,7 @@ async def lifespan(app: FastAPI): sql_storage="mysql", vector_storage="milvus", embedding_model=EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2, - embedding_workers_num=2 + embedding_workers_num=6 ) yield From d8afc32c4fa1d04df1fe39839280133f13b3113d Mon Sep 17 00:00:00 2001 From: Yuval-Roth Date: Mon, 16 Jun 2025 13:20:32 +0300 Subject: [PATCH 95/98] Updated packages in requirements.txt to support python 12 --- requirements.txt | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/requirements.txt b/requirements.txt index 2db7121..91ab0f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,24 +1,24 @@ cachetools==5.3.1 DBUtils==1.4 -Flask==3.0.0 -numpy==1.24.4 -onnxruntime==1.16.1 +Flask==3.1.1 +numpy==2.2.6 +onnxruntime==1.22.0 openai==0.28.1 pymilvus==2.5.9 PyMySQL==1.1.1 Requests==2.32.3 torch==2.7.0 -transformers==4.44.2 -faiss-cpu==1.7.4 +transformers==4.52.4 +faiss-cpu==1.11.0 redis==5.0.1 modelscope==1.26.0 -fastapi==0.115.12 +fastapi==0.115.9 uvicorn==0.34.3 -chromadb==0.5.23 +chromadb==1.0.12 elasticsearch==7.10.0 snowflake-id==1.0.2 -flagembedding==1.3.4 +flagembedding==1.3.5 cryptography==45.0.2 sentence-transformers==4.1.0 -pytest>=8.0 +pytest==8.0 readerwriterlock==1.0.9 From 29c9e132820bc73d028226c59b491c01945a07c6 Mon Sep 17 00:00:00 2001 From: llbbl Date: Wed, 25 Jun 2025 08:24:04 -0500 Subject: [PATCH 96/98] feat: Set up comprehensive Python testing infrastructure with Poetry - Migrated from requirements.txt to Poetry package management - Added pytest, pytest-cov, and pytest-mock as dev dependencies - Configured pytest with coverage reporting (80% threshold) - Created test directory structure with unit/integration folders - Added comprehensive shared fixtures in conftest.py - Set up test markers for unit, integration, and slow tests - Created validation tests to verify infrastructure setup - Updated .gitignore with testing and Claude-related entries - Configured Poetry scripts for running tests (test/tests commands) --- .gitignore | 46 +- poetry.lock | 7703 ++++++++++++++++++++++++++++++++ pyproject.toml | 101 + tests/__init__.py | 0 tests/conftest.py | 218 + tests/integration/__init__.py | 0 tests/test_setup_validation.py | 143 + tests/unit/__init__.py | 0 8 files changed, 8210 insertions(+), 1 deletion(-) create mode 100644 poetry.lock create mode 100644 pyproject.toml create mode 100644 tests/__init__.py create mode 100644 tests/conftest.py create mode 100644 tests/integration/__init__.py create mode 100644 tests/test_setup_validation.py create mode 100644 tests/unit/__init__.py diff --git a/.gitignore b/.gitignore index 5d4a0f7..f698979 100644 --- a/.gitignore +++ b/.gitignore @@ -145,4 +145,48 @@ dmypy.json **/model/ /data/milvus/db -/data/mysql/db \ No newline at end of file +/data/mysql/db + +# Testing +.pytest_cache/ +.coverage +.coverage.* +htmlcov/ +coverage.xml +*.py,cover +.hypothesis/ +pytest_cache/ +test-results/ +.tox/ +.nox/ + +# Claude +.claude/* + +# Poetry +dist/ + +# Virtual environments +.venv/ +venv/ +ENV/ +env/ +.env + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Temporary files +*.tmp +*.bak +*.orig +tmp/ +temp/ \ No newline at end of file diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..c2783bc --- /dev/null +++ b/poetry.lock @@ -0,0 +1,7703 @@ +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. + +[[package]] +name = "addict" +version = "2.4.0" +description = "Addict is a dictionary whose items can be set using both attribute and item syntax." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "addict-2.4.0-py3-none-any.whl", hash = "sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc"}, + {file = "addict-2.4.0.tar.gz", hash = "sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494"}, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.4" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, + {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8"}, + {file = "aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558"}, +] + +[[package]] +name = "aiohttp" +version = "3.10.11" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5077b1a5f40ffa3ba1f40d537d3bec4383988ee51fbba6b74aa8fb1bc466599e"}, + {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d6a14a4d93b5b3c2891fca94fa9d41b2322a68194422bef0dd5ec1e57d7d298"}, + {file = "aiohttp-3.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffbfde2443696345e23a3c597049b1dd43049bb65337837574205e7368472177"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20b3d9e416774d41813bc02fdc0663379c01817b0874b932b81c7f777f67b217"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b943011b45ee6bf74b22245c6faab736363678e910504dd7531a58c76c9015a"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48bc1d924490f0d0b3658fe5c4b081a4d56ebb58af80a6729d4bd13ea569797a"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e12eb3f4b1f72aaaf6acd27d045753b18101524f72ae071ae1c91c1cd44ef115"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f14ebc419a568c2eff3c1ed35f634435c24ead2fe19c07426af41e7adb68713a"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:72b191cdf35a518bfc7ca87d770d30941decc5aaf897ec8b484eb5cc8c7706f3"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5ab2328a61fdc86424ee540d0aeb8b73bbcad7351fb7cf7a6546fc0bcffa0038"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa93063d4af05c49276cf14e419550a3f45258b6b9d1f16403e777f1addf4519"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:30283f9d0ce420363c24c5c2421e71a738a2155f10adbb1a11a4d4d6d2715cfc"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e5358addc8044ee49143c546d2182c15b4ac3a60be01c3209374ace05af5733d"}, + {file = "aiohttp-3.10.11-cp310-cp310-win32.whl", hash = "sha256:e1ffa713d3ea7cdcd4aea9cddccab41edf6882fa9552940344c44e59652e1120"}, + {file = "aiohttp-3.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:778cbd01f18ff78b5dd23c77eb82987ee4ba23408cbed233009fd570dda7e674"}, + {file = "aiohttp-3.10.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:80ff08556c7f59a7972b1e8919f62e9c069c33566a6d28586771711e0eea4f07"}, + {file = "aiohttp-3.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c8f96e9ee19f04c4914e4e7a42a60861066d3e1abf05c726f38d9d0a466e695"}, + {file = "aiohttp-3.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fb8601394d537da9221947b5d6e62b064c9a43e88a1ecd7414d21a1a6fba9c24"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ea224cf7bc2d8856d6971cea73b1d50c9c51d36971faf1abc169a0d5f85a382"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db9503f79e12d5d80b3efd4d01312853565c05367493379df76d2674af881caa"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0f449a50cc33f0384f633894d8d3cd020e3ccef81879c6e6245c3c375c448625"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82052be3e6d9e0c123499127782a01a2b224b8af8c62ab46b3f6197035ad94e9"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20063c7acf1eec550c8eb098deb5ed9e1bb0521613b03bb93644b810986027ac"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:489cced07a4c11488f47aab1f00d0c572506883f877af100a38f1fedaa884c3a"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ea9b3bab329aeaa603ed3bf605f1e2a6f36496ad7e0e1aa42025f368ee2dc07b"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ca117819d8ad113413016cb29774b3f6d99ad23c220069789fc050267b786c16"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2dfb612dcbe70fb7cdcf3499e8d483079b89749c857a8f6e80263b021745c730"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9b615d3da0d60e7d53c62e22b4fd1c70f4ae5993a44687b011ea3a2e49051b8"}, + {file = "aiohttp-3.10.11-cp311-cp311-win32.whl", hash = "sha256:29103f9099b6068bbdf44d6a3d090e0a0b2be6d3c9f16a070dd9d0d910ec08f9"}, + {file = "aiohttp-3.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:236b28ceb79532da85d59aa9b9bf873b364e27a0acb2ceaba475dc61cffb6f3f"}, + {file = "aiohttp-3.10.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7480519f70e32bfb101d71fb9a1f330fbd291655a4c1c922232a48c458c52710"}, + {file = "aiohttp-3.10.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f65267266c9aeb2287a6622ee2bb39490292552f9fbf851baabc04c9f84e048d"}, + {file = "aiohttp-3.10.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7400a93d629a0608dc1d6c55f1e3d6e07f7375745aaa8bd7f085571e4d1cee97"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f34b97e4b11b8d4eb2c3a4f975be626cc8af99ff479da7de49ac2c6d02d35725"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e7b825da878464a252ccff2958838f9caa82f32a8dbc334eb9b34a026e2c636"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9f92a344c50b9667827da308473005f34767b6a2a60d9acff56ae94f895f385"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f1ab987a27b83c5268a17218463c2ec08dbb754195113867a27b166cd6087"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1dc0f4ca54842173d03322793ebcf2c8cc2d34ae91cc762478e295d8e361e03f"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7ce6a51469bfaacff146e59e7fb61c9c23006495d11cc24c514a455032bcfa03"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:aad3cd91d484d065ede16f3cf15408254e2469e3f613b241a1db552c5eb7ab7d"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f4df4b8ca97f658c880fb4b90b1d1ec528315d4030af1ec763247ebfd33d8b9a"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2e4e18a0a2d03531edbc06c366954e40a3f8d2a88d2b936bbe78a0c75a3aab3e"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6ce66780fa1a20e45bc753cda2a149daa6dbf1561fc1289fa0c308391c7bc0a4"}, + {file = "aiohttp-3.10.11-cp312-cp312-win32.whl", hash = "sha256:a919c8957695ea4c0e7a3e8d16494e3477b86f33067478f43106921c2fef15bb"}, + {file = "aiohttp-3.10.11-cp312-cp312-win_amd64.whl", hash = "sha256:b5e29706e6389a2283a91611c91bf24f218962717c8f3b4e528ef529d112ee27"}, + {file = "aiohttp-3.10.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:703938e22434d7d14ec22f9f310559331f455018389222eed132808cd8f44127"}, + {file = "aiohttp-3.10.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9bc50b63648840854e00084c2b43035a62e033cb9b06d8c22b409d56eb098413"}, + {file = "aiohttp-3.10.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5f0463bf8b0754bc744e1feb61590706823795041e63edf30118a6f0bf577461"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6c6dec398ac5a87cb3a407b068e1106b20ef001c344e34154616183fe684288"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcaf2d79104d53d4dcf934f7ce76d3d155302d07dae24dff6c9fffd217568067"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25fd5470922091b5a9aeeb7e75be609e16b4fba81cdeaf12981393fb240dd10e"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbde2ca67230923a42161b1f408c3992ae6e0be782dca0c44cb3206bf330dee1"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:249c8ff8d26a8b41a0f12f9df804e7c685ca35a207e2410adbd3e924217b9006"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:878ca6a931ee8c486a8f7b432b65431d095c522cbeb34892bee5be97b3481d0f"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8663f7777ce775f0413324be0d96d9730959b2ca73d9b7e2c2c90539139cbdd6"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6cd3f10b01f0c31481fba8d302b61603a2acb37b9d30e1d14e0f5a58b7b18a31"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e8d8aad9402d3aa02fdc5ca2fe68bcb9fdfe1f77b40b10410a94c7f408b664d"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:38e3c4f80196b4f6c3a85d134a534a56f52da9cb8d8e7af1b79a32eefee73a00"}, + {file = "aiohttp-3.10.11-cp313-cp313-win32.whl", hash = "sha256:fc31820cfc3b2863c6e95e14fcf815dc7afe52480b4dc03393c4873bb5599f71"}, + {file = "aiohttp-3.10.11-cp313-cp313-win_amd64.whl", hash = "sha256:4996ff1345704ffdd6d75fb06ed175938c133425af616142e7187f28dc75f14e"}, + {file = "aiohttp-3.10.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:74baf1a7d948b3d640badeac333af581a367ab916b37e44cf90a0334157cdfd2"}, + {file = "aiohttp-3.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:473aebc3b871646e1940c05268d451f2543a1d209f47035b594b9d4e91ce8339"}, + {file = "aiohttp-3.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c2f746a6968c54ab2186574e15c3f14f3e7f67aef12b761e043b33b89c5b5f95"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d110cabad8360ffa0dec8f6ec60e43286e9d251e77db4763a87dcfe55b4adb92"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0099c7d5d7afff4202a0c670e5b723f7718810000b4abcbc96b064129e64bc7"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0316e624b754dbbf8c872b62fe6dcb395ef20c70e59890dfa0de9eafccd2849d"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a5f7ab8baf13314e6b2485965cbacb94afff1e93466ac4d06a47a81c50f9cca"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c891011e76041e6508cbfc469dd1a8ea09bc24e87e4c204e05f150c4c455a5fa"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9208299251370ee815473270c52cd3f7069ee9ed348d941d574d1457d2c73e8b"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:459f0f32c8356e8125f45eeff0ecf2b1cb6db1551304972702f34cd9e6c44658"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:14cdc8c1810bbd4b4b9f142eeee23cda528ae4e57ea0923551a9af4820980e39"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:971aa438a29701d4b34e4943e91b5e984c3ae6ccbf80dd9efaffb01bd0b243a9"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9a309c5de392dfe0f32ee57fa43ed8fc6ddf9985425e84bd51ed66bb16bce3a7"}, + {file = "aiohttp-3.10.11-cp38-cp38-win32.whl", hash = "sha256:9ec1628180241d906a0840b38f162a3215114b14541f1a8711c368a8739a9be4"}, + {file = "aiohttp-3.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:9c6e0ffd52c929f985c7258f83185d17c76d4275ad22e90aa29f38e211aacbec"}, + {file = "aiohttp-3.10.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cdc493a2e5d8dc79b2df5bec9558425bcd39aff59fc949810cbd0832e294b106"}, + {file = "aiohttp-3.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b3e70f24e7d0405be2348da9d5a7836936bf3a9b4fd210f8c37e8d48bc32eca6"}, + {file = "aiohttp-3.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968b8fb2a5eee2770eda9c7b5581587ef9b96fbdf8dcabc6b446d35ccc69df01"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deef4362af9493d1382ef86732ee2e4cbc0d7c005947bd54ad1a9a16dd59298e"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:686b03196976e327412a1b094f4120778c7c4b9cff9bce8d2fdfeca386b89829"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3bf6d027d9d1d34e1c2e1645f18a6498c98d634f8e373395221121f1c258ace8"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:099fd126bf960f96d34a760e747a629c27fb3634da5d05c7ef4d35ef4ea519fc"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c73c4d3dae0b4644bc21e3de546530531d6cdc88659cdeb6579cd627d3c206aa"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0c5580f3c51eea91559db3facd45d72e7ec970b04528b4709b1f9c2555bd6d0b"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fdf6429f0caabfd8a30c4e2eaecb547b3c340e4730ebfe25139779b9815ba138"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d97187de3c276263db3564bb9d9fad9e15b51ea10a371ffa5947a5ba93ad6777"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:0acafb350cfb2eba70eb5d271f55e08bd4502ec35e964e18ad3e7d34d71f7261"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c13ed0c779911c7998a58e7848954bd4d63df3e3575f591e321b19a2aec8df9f"}, + {file = "aiohttp-3.10.11-cp39-cp39-win32.whl", hash = "sha256:22b7c540c55909140f63ab4f54ec2c20d2635c0289cdd8006da46f3327f971b9"}, + {file = "aiohttp-3.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:7b26b1551e481012575dab8e3727b16fe7dd27eb2711d2e63ced7368756268fb"}, + {file = "aiohttp-3.10.11.tar.gz", hash = "sha256:9dc2b8f3dcab2e39e0fa309c8da50c3b55e6f34ab25f1a71d3288f24924d33a7"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.12.0,<2.0" + +[package.extras] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.2.0) ; sys_platform == \"linux\" or sys_platform == \"darwin\"", "brotlicffi ; platform_python_implementation != \"CPython\""] + +[[package]] +name = "aiohttp" +version = "3.12.13" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5421af8f22a98f640261ee48aae3a37f0c41371e99412d55eaf2f8a46d5dad29"}, + {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fcda86f6cb318ba36ed8f1396a6a4a3fd8f856f84d426584392083d10da4de0"}, + {file = "aiohttp-3.12.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cd71c9fb92aceb5a23c4c39d8ecc80389c178eba9feab77f19274843eb9412d"}, + {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34ebf1aca12845066c963016655dac897651e1544f22a34c9b461ac3b4b1d3aa"}, + {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:893a4639694c5b7edd4bdd8141be296042b6806e27cc1d794e585c43010cc294"}, + {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:663d8ee3ffb3494502ebcccb49078faddbb84c1d870f9c1dd5a29e85d1f747ce"}, + {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0f8f6a85a0006ae2709aa4ce05749ba2cdcb4b43d6c21a16c8517c16593aabe"}, + {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1582745eb63df267c92d8b61ca655a0ce62105ef62542c00a74590f306be8cb5"}, + {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d59227776ee2aa64226f7e086638baa645f4b044f2947dbf85c76ab11dcba073"}, + {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06b07c418bde1c8e737d8fa67741072bd3f5b0fb66cf8c0655172188c17e5fa6"}, + {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:9445c1842680efac0f81d272fd8db7163acfcc2b1436e3f420f4c9a9c5a50795"}, + {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:09c4767af0b0b98c724f5d47f2bf33395c8986995b0a9dab0575ca81a554a8c0"}, + {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f3854fbde7a465318ad8d3fc5bef8f059e6d0a87e71a0d3360bb56c0bf87b18a"}, + {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2332b4c361c05ecd381edb99e2a33733f3db906739a83a483974b3df70a51b40"}, + {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1561db63fa1b658cd94325d303933553ea7d89ae09ff21cc3bcd41b8521fbbb6"}, + {file = "aiohttp-3.12.13-cp310-cp310-win32.whl", hash = "sha256:a0be857f0b35177ba09d7c472825d1b711d11c6d0e8a2052804e3b93166de1ad"}, + {file = "aiohttp-3.12.13-cp310-cp310-win_amd64.whl", hash = "sha256:fcc30ad4fb5cb41a33953292d45f54ef4066746d625992aeac33b8c681173178"}, + {file = "aiohttp-3.12.13-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c229b1437aa2576b99384e4be668af1db84b31a45305d02f61f5497cfa6f60c"}, + {file = "aiohttp-3.12.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04076d8c63471e51e3689c93940775dc3d12d855c0c80d18ac5a1c68f0904358"}, + {file = "aiohttp-3.12.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:55683615813ce3601640cfaa1041174dc956d28ba0511c8cbd75273eb0587014"}, + {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:921bc91e602d7506d37643e77819cb0b840d4ebb5f8d6408423af3d3bf79a7b7"}, + {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e72d17fe0974ddeae8ed86db297e23dba39c7ac36d84acdbb53df2e18505a013"}, + {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0653d15587909a52e024a261943cf1c5bdc69acb71f411b0dd5966d065a51a47"}, + {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a77b48997c66722c65e157c06c74332cdf9c7ad00494b85ec43f324e5c5a9b9a"}, + {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6946bae55fd36cfb8e4092c921075cde029c71c7cb571d72f1079d1e4e013bc"}, + {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f95db8c8b219bcf294a53742c7bda49b80ceb9d577c8e7aa075612b7f39ffb7"}, + {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:03d5eb3cfb4949ab4c74822fb3326cd9655c2b9fe22e4257e2100d44215b2e2b"}, + {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:6383dd0ffa15515283c26cbf41ac8e6705aab54b4cbb77bdb8935a713a89bee9"}, + {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6548a411bc8219b45ba2577716493aa63b12803d1e5dc70508c539d0db8dbf5a"}, + {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:81b0fcbfe59a4ca41dc8f635c2a4a71e63f75168cc91026c61be665945739e2d"}, + {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:6a83797a0174e7995e5edce9dcecc517c642eb43bc3cba296d4512edf346eee2"}, + {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a5734d8469a5633a4e9ffdf9983ff7cdb512524645c7a3d4bc8a3de45b935ac3"}, + {file = "aiohttp-3.12.13-cp311-cp311-win32.whl", hash = "sha256:fef8d50dfa482925bb6b4c208b40d8e9fa54cecba923dc65b825a72eed9a5dbd"}, + {file = "aiohttp-3.12.13-cp311-cp311-win_amd64.whl", hash = "sha256:9a27da9c3b5ed9d04c36ad2df65b38a96a37e9cfba6f1381b842d05d98e6afe9"}, + {file = "aiohttp-3.12.13-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0aa580cf80558557285b49452151b9c69f2fa3ad94c5c9e76e684719a8791b73"}, + {file = "aiohttp-3.12.13-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b103a7e414b57e6939cc4dece8e282cfb22043efd0c7298044f6594cf83ab347"}, + {file = "aiohttp-3.12.13-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f64e748e9e741d2eccff9597d09fb3cd962210e5b5716047cbb646dc8fe06f"}, + {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29c955989bf4c696d2ededc6b0ccb85a73623ae6e112439398935362bacfaaf6"}, + {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d640191016763fab76072c87d8854a19e8e65d7a6fcfcbf017926bdbbb30a7e5"}, + {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dc507481266b410dede95dd9f26c8d6f5a14315372cc48a6e43eac652237d9b"}, + {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a94daa873465d518db073bd95d75f14302e0208a08e8c942b2f3f1c07288a75"}, + {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f52420cde4ce0bb9425a375d95577fe082cb5721ecb61da3049b55189e4e6"}, + {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f7df1f620ec40f1a7fbcb99ea17d7326ea6996715e78f71a1c9a021e31b96b8"}, + {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3062d4ad53b36e17796dce1c0d6da0ad27a015c321e663657ba1cc7659cfc710"}, + {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:8605e22d2a86b8e51ffb5253d9045ea73683d92d47c0b1438e11a359bdb94462"}, + {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54fbbe6beafc2820de71ece2198458a711e224e116efefa01b7969f3e2b3ddae"}, + {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:050bd277dfc3768b606fd4eae79dd58ceda67d8b0b3c565656a89ae34525d15e"}, + {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2637a60910b58f50f22379b6797466c3aa6ae28a6ab6404e09175ce4955b4e6a"}, + {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e986067357550d1aaa21cfe9897fa19e680110551518a5a7cf44e6c5638cb8b5"}, + {file = "aiohttp-3.12.13-cp312-cp312-win32.whl", hash = "sha256:ac941a80aeea2aaae2875c9500861a3ba356f9ff17b9cb2dbfb5cbf91baaf5bf"}, + {file = "aiohttp-3.12.13-cp312-cp312-win_amd64.whl", hash = "sha256:671f41e6146a749b6c81cb7fd07f5a8356d46febdaaaf07b0e774ff04830461e"}, + {file = "aiohttp-3.12.13-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d4a18e61f271127465bdb0e8ff36e8f02ac4a32a80d8927aa52371e93cd87938"}, + {file = "aiohttp-3.12.13-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:532542cb48691179455fab429cdb0d558b5e5290b033b87478f2aa6af5d20ace"}, + {file = "aiohttp-3.12.13-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d7eea18b52f23c050ae9db5d01f3d264ab08f09e7356d6f68e3f3ac2de9dfabb"}, + {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad7c8e5c25f2a26842a7c239de3f7b6bfb92304593ef997c04ac49fb703ff4d7"}, + {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6af355b483e3fe9d7336d84539fef460120c2f6e50e06c658fe2907c69262d6b"}, + {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a95cf9f097498f35c88e3609f55bb47b28a5ef67f6888f4390b3d73e2bac6177"}, + {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8ed8c38a1c584fe99a475a8f60eefc0b682ea413a84c6ce769bb19a7ff1c5ef"}, + {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0b9170d5d800126b5bc89d3053a2363406d6e327afb6afaeda2d19ee8bb103"}, + {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:372feeace612ef8eb41f05ae014a92121a512bd5067db8f25101dd88a8db11da"}, + {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a946d3702f7965d81f7af7ea8fb03bb33fe53d311df48a46eeca17e9e0beed2d"}, + {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a0c4725fae86555bbb1d4082129e21de7264f4ab14baf735278c974785cd2041"}, + {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b28ea2f708234f0a5c44eb6c7d9eb63a148ce3252ba0140d050b091b6e842d1"}, + {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d4f5becd2a5791829f79608c6f3dc745388162376f310eb9c142c985f9441cc1"}, + {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:60f2ce6b944e97649051d5f5cc0f439360690b73909230e107fd45a359d3e911"}, + {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:69fc1909857401b67bf599c793f2183fbc4804717388b0b888f27f9929aa41f3"}, + {file = "aiohttp-3.12.13-cp313-cp313-win32.whl", hash = "sha256:7d7e68787a2046b0e44ba5587aa723ce05d711e3a3665b6b7545328ac8e3c0dd"}, + {file = "aiohttp-3.12.13-cp313-cp313-win_amd64.whl", hash = "sha256:5a178390ca90419bfd41419a809688c368e63c86bd725e1186dd97f6b89c2706"}, + {file = "aiohttp-3.12.13-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:36f6c973e003dc9b0bb4e8492a643641ea8ef0e97ff7aaa5c0f53d68839357b4"}, + {file = "aiohttp-3.12.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6cbfc73179bd67c229eb171e2e3745d2afd5c711ccd1e40a68b90427f282eab1"}, + {file = "aiohttp-3.12.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1e8b27b2d414f7e3205aa23bb4a692e935ef877e3a71f40d1884f6e04fd7fa74"}, + {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eabded0c2b2ef56243289112c48556c395d70150ce4220d9008e6b4b3dd15690"}, + {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:003038e83f1a3ff97409999995ec02fe3008a1d675478949643281141f54751d"}, + {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b6f46613031dbc92bdcaad9c4c22c7209236ec501f9c0c5f5f0b6a689bf50f3"}, + {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c332c6bb04650d59fb94ed96491f43812549a3ba6e7a16a218e612f99f04145e"}, + {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fea41a2c931fb582cb15dc86a3037329e7b941df52b487a9f8b5aa960153cbd"}, + {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:846104f45d18fb390efd9b422b27d8f3cf8853f1218c537f36e71a385758c896"}, + {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d6c85ac7dd350f8da2520bac8205ce99df4435b399fa7f4dc4a70407073e390"}, + {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5a1ecce0ed281bec7da8550da052a6b89552db14d0a0a45554156f085a912f48"}, + {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5304d74867028cca8f64f1cc1215eb365388033c5a691ea7aa6b0dc47412f495"}, + {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:64d1f24ee95a2d1e094a4cd7a9b7d34d08db1bbcb8aa9fb717046b0a884ac294"}, + {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:119c79922a7001ca6a9e253228eb39b793ea994fd2eccb79481c64b5f9d2a055"}, + {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:bb18f00396d22e2f10cd8825d671d9f9a3ba968d708a559c02a627536b36d91c"}, + {file = "aiohttp-3.12.13-cp39-cp39-win32.whl", hash = "sha256:0022de47ef63fd06b065d430ac79c6b0bd24cdae7feaf0e8c6bac23b805a23a8"}, + {file = "aiohttp-3.12.13-cp39-cp39-win_amd64.whl", hash = "sha256:29e08111ccf81b2734ae03f1ad1cb03b9615e7d8f616764f22f71209c094f122"}, + {file = "aiohttp-3.12.13.tar.gz", hash = "sha256:47e2da578528264a12e4e3dd8dd72a7289e5f812758fe086473fab037a10fcce"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.5.0" +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +propcache = ">=0.2.0" +yarl = ">=1.17.0,<2.0" + +[package.extras] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.3.0)", "brotlicffi ; platform_python_implementation != \"CPython\""] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "aiosignal" +version = "1.3.2" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, + {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "aliyun-python-sdk-core" +version = "2.16.0" +description = "The core module of Aliyun Python SDK." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "aliyun-python-sdk-core-2.16.0.tar.gz", hash = "sha256:651caad597eb39d4fad6cf85133dffe92837d53bdf62db9d8f37dab6508bb8f9"}, +] + +[package.dependencies] +cryptography = ">=3.0.0" +jmespath = ">=0.9.3,<1.0.0" + +[[package]] +name = "aliyun-python-sdk-kms" +version = "2.16.5" +description = "The kms module of Aliyun Python sdk." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "aliyun-python-sdk-kms-2.16.5.tar.gz", hash = "sha256:f328a8a19d83ecbb965ffce0ec1e9930755216d104638cd95ecd362753b813b3"}, + {file = "aliyun_python_sdk_kms-2.16.5-py2.py3-none-any.whl", hash = "sha256:24b6cdc4fd161d2942619479c8d050c63ea9cd22b044fe33b60bbb60153786f0"}, +] + +[package.dependencies] +aliyun-python-sdk-core = ">=2.11.5" + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "anyio" +version = "4.5.2" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, + {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21.0b1) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "anyio" +version = "4.9.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, + {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "argon2-cffi" +version = "25.1.0" +description = "Argon2 for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "argon2_cffi-25.1.0-py3-none-any.whl", hash = "sha256:fdc8b074db390fccb6eb4a3604ae7231f219aa669a2652e0f20e16ba513d5741"}, + {file = "argon2_cffi-25.1.0.tar.gz", hash = "sha256:694ae5cc8a42f4c4e2bf2ca0e64e51e23a040c6a517a85074683d3959e1346c1"}, +] + +[package.dependencies] +argon2-cffi-bindings = "*" + +[[package]] +name = "argon2-cffi-bindings" +version = "21.2.0" +description = "Low-level CFFI bindings for Argon2" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, + {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, +] + +[package.dependencies] +cffi = ">=1.0.1" + +[package.extras] +dev = ["cogapp", "pre-commit", "pytest", "wheel"] +tests = ["pytest"] + +[[package]] +name = "asgiref" +version = "3.8.1" +description = "ASGI specs, helper code, and adapters" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47"}, + {file = "asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4", markers = "python_version < \"3.11\""} + +[package.extras] +tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] + +[[package]] +name = "async-timeout" +version = "5.0.1" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_full_version <= \"3.11.2\"" +files = [ + {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, + {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, +] + +[[package]] +name = "attrs" +version = "25.3.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, + {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, +] + +[package.extras] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] + +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +groups = ["main"] +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "backports-datetime-fromisoformat" +version = "2.0.3" +description = "Backport of Python 3.11's datetime.fromisoformat" +optional = false +python-versions = ">3" +groups = ["main"] +markers = "python_version == \"3.10\"" +files = [ + {file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f681f638f10588fa3c101ee9ae2b63d3734713202ddfcfb6ec6cea0778a29d4"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:cd681460e9142f1249408e5aee6d178c6d89b49e06d44913c8fdfb6defda8d1c"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:ee68bc8735ae5058695b76d3bb2aee1d137c052a11c8303f1e966aa23b72b65b"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8273fe7932db65d952a43e238318966eab9e49e8dd546550a41df12175cc2be4"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39d57ea50aa5a524bb239688adc1d1d824c31b6094ebd39aa164d6cadb85de22"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ac6272f87693e78209dc72e84cf9ab58052027733cd0721c55356d3c881791cf"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:44c497a71f80cd2bcfc26faae8857cf8e79388e3d5fbf79d2354b8c360547d58"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:6335a4c9e8af329cb1ded5ab41a666e1448116161905a94e054f205aa6d263bc"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2e4b66e017253cdbe5a1de49e0eecff3f66cd72bcb1229d7db6e6b1832c0443"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:43e2d648e150777e13bbc2549cc960373e37bf65bd8a5d2e0cef40e16e5d8dd0"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:4ce6326fd86d5bae37813c7bf1543bae9e4c215ec6f5afe4c518be2635e2e005"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7c8fac333bf860208fd522a5394369ee3c790d0aa4311f515fcc4b6c5ef8d75"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24a4da5ab3aa0cc293dc0662a0c6d1da1a011dc1edcbc3122a288cfed13a0b45"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:58ea11e3bf912bd0a36b0519eae2c5b560b3cb972ea756e66b73fb9be460af01"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8a375c7dbee4734318714a799b6c697223e4bbb57232af37fbfff88fb48a14c6"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:ac677b1664c4585c2e014739f6678137c8336815406052349c85898206ec7061"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66ce47ee1ba91e146149cf40565c3d750ea1be94faf660ca733d8601e0848147"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:8b7e069910a66b3bba61df35b5f879e5253ff0821a70375b9daf06444d046fa4"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:a3b5d1d04a9e0f7b15aa1e647c750631a873b298cdd1255687bb68779fe8eb35"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1b95986430e789c076610aea704db20874f0781b8624f648ca9fb6ef67c6e1"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffe5f793db59e2f1d45ec35a1cf51404fdd69df9f6952a0c87c3060af4c00e32"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:620e8e73bd2595dfff1b4d256a12b67fce90ece3de87b38e1dde46b910f46f4d"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4cf9c0a985d68476c1cabd6385c691201dda2337d7453fb4da9679ce9f23f4e7"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:d144868a73002e6e2e6fef72333e7b0129cecdd121aa8f1edba7107fd067255d"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e81b26497a17c29595bc7df20bc6a872ceea5f8c9d6537283945d4b6396aec10"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:5ba00ead8d9d82fd6123eb4891c566d30a293454e54e32ff7ead7644f5f7e575"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:24d574cb4072e1640b00864e94c4c89858033936ece3fc0e1c6f7179f120d0a8"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9735695a66aad654500b0193525e590c693ab3368478ce07b34b443a1ea5e824"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63d39709e17eb72685d052ac82acf0763e047f57c86af1b791505b1fec96915d"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:1ea2cc84224937d6b9b4c07f5cb7c667f2bde28c255645ba27f8a675a7af8234"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4024e6d35a9fdc1b3fd6ac7a673bd16cb176c7e0b952af6428b7129a70f72cce"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5e2dcc94dc9c9ab8704409d86fcb5236316e9dcef6feed8162287634e3568f4c"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fa2de871801d824c255fac7e5e7e50f2be6c9c376fd9268b40c54b5e9da91f42"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:1314d4923c1509aa9696712a7bc0c7160d3b7acf72adafbbe6c558d523f5d491"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:b750ecba3a8815ad8bc48311552f3f8ab99dd2326d29df7ff670d9c49321f48f"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d5117dce805d8a2f78baeddc8c6127281fa0a5e2c40c6dd992ba6b2b367876"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb35f607bd1cbe37b896379d5f5ed4dc298b536f4b959cb63180e05cacc0539d"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:61c74710900602637d2d145dda9720c94e303380803bf68811b2a151deec75c2"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ece59af54ebf67ecbfbbf3ca9066f5687879e36527ad69d8b6e3ac565d565a62"}, + {file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:d0a7c5f875068efe106f62233bc712d50db4d07c13c7db570175c7857a7b5dbd"}, + {file = "backports_datetime_fromisoformat-2.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90e202e72a3d5aae673fcc8c9a4267d56b2f532beeb9173361293625fe4d2039"}, + {file = "backports_datetime_fromisoformat-2.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2df98ef1b76f5a58bb493dda552259ba60c3a37557d848e039524203951c9f06"}, + {file = "backports_datetime_fromisoformat-2.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7100adcda5e818b5a894ad0626e38118bb896a347f40ebed8981155675b9ba7b"}, + {file = "backports_datetime_fromisoformat-2.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e410383f5d6a449a529d074e88af8bc80020bb42b402265f9c02c8358c11da5"}, + {file = "backports_datetime_fromisoformat-2.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2797593760da6bcc32c4a13fa825af183cd4bfd333c60b3dbf84711afca26ef"}, + {file = "backports_datetime_fromisoformat-2.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35a144fd681a0bea1013ccc4cd3fd4dc758ea17ee23dca019c02b82ec46fc0c4"}, + {file = "backports_datetime_fromisoformat-2.0.3.tar.gz", hash = "sha256:b58edc8f517b66b397abc250ecc737969486703a66eb97e01e6d51291b1a139d"}, +] + +[[package]] +name = "bcrypt" +version = "4.3.0" +description = "Modern password hashing for your software and your servers" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "bcrypt-4.3.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f01e060f14b6b57bbb72fc5b4a83ac21c443c9a2ee708e04a10e9192f90a6281"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5eeac541cefd0bb887a371ef73c62c3cd78535e4887b310626036a7c0a817bb"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59e1aa0e2cd871b08ca146ed08445038f42ff75968c7ae50d2fdd7860ade2180"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:0042b2e342e9ae3d2ed22727c1262f76cc4f345683b5c1715f0250cf4277294f"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74a8d21a09f5e025a9a23e7c0fd2c7fe8e7503e4d356c0a2c1486ba010619f09"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:0142b2cb84a009f8452c8c5a33ace5e3dfec4159e7735f5afe9a4d50a8ea722d"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:12fa6ce40cde3f0b899729dbd7d5e8811cb892d31b6f7d0334a1f37748b789fd"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:5bd3cca1f2aa5dbcf39e2aa13dd094ea181f48959e1071265de49cc2b82525af"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:335a420cfd63fc5bc27308e929bee231c15c85cc4c496610ffb17923abf7f231"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:0e30e5e67aed0187a1764911af023043b4542e70a7461ad20e837e94d23e1d6c"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b8d62290ebefd49ee0b3ce7500f5dbdcf13b81402c05f6dafab9a1e1b27212f"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2ef6630e0ec01376f59a006dc72918b1bf436c3b571b80fa1968d775fa02fe7d"}, + {file = "bcrypt-4.3.0-cp313-cp313t-win32.whl", hash = "sha256:7a4be4cbf241afee43f1c3969b9103a41b40bcb3a3f467ab19f891d9bc4642e4"}, + {file = "bcrypt-4.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c1949bf259a388863ced887c7861da1df681cb2388645766c89fdfd9004c669"}, + {file = "bcrypt-4.3.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:f81b0ed2639568bf14749112298f9e4e2b28853dab50a8b357e31798686a036d"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:864f8f19adbe13b7de11ba15d85d4a428c7e2f344bac110f667676a0ff84924b"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e36506d001e93bffe59754397572f21bb5dc7c83f54454c990c74a468cd589e"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:842d08d75d9fe9fb94b18b071090220697f9f184d4547179b60734846461ed59"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7c03296b85cb87db865d91da79bf63d5609284fc0cab9472fdd8367bbd830753"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62f26585e8b219cdc909b6a0069efc5e4267e25d4a3770a364ac58024f62a761"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:beeefe437218a65322fbd0069eb437e7c98137e08f22c4660ac2dc795c31f8bb"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:97eea7408db3a5bcce4a55d13245ab3fa566e23b4c67cd227062bb49e26c585d"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:191354ebfe305e84f344c5964c7cd5f924a3bfc5d405c75ad07f232b6dffb49f"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:41261d64150858eeb5ff43c753c4b216991e0ae16614a308a15d909503617732"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:33752b1ba962ee793fa2b6321404bf20011fe45b9afd2a842139de3011898fef"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:50e6e80a4bfd23a25f5c05b90167c19030cf9f87930f7cb2eacb99f45d1c3304"}, + {file = "bcrypt-4.3.0-cp38-abi3-win32.whl", hash = "sha256:67a561c4d9fb9465ec866177e7aebcad08fe23aaf6fbd692a6fab69088abfc51"}, + {file = "bcrypt-4.3.0-cp38-abi3-win_amd64.whl", hash = "sha256:584027857bc2843772114717a7490a37f68da563b3620f78a849bcb54dc11e62"}, + {file = "bcrypt-4.3.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d3efb1157edebfd9128e4e46e2ac1a64e0c1fe46fb023158a407c7892b0f8c3"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08bacc884fd302b611226c01014eca277d48f0a05187666bca23aac0dad6fe24"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6746e6fec103fcd509b96bacdfdaa2fbde9a553245dbada284435173a6f1aef"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:afe327968aaf13fc143a56a3360cb27d4ad0345e34da12c7290f1b00b8fe9a8b"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d9af79d322e735b1fc33404b5765108ae0ff232d4b54666d46730f8ac1a43676"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f1e3ffa1365e8702dc48c8b360fef8d7afeca482809c5e45e653af82ccd088c1"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3004df1b323d10021fda07a813fd33e0fd57bef0e9a480bb143877f6cba996fe"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:531457e5c839d8caea9b589a1bcfe3756b0547d7814e9ce3d437f17da75c32b0"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:17a854d9a7a476a89dcef6c8bd119ad23e0f82557afbd2c442777a16408e614f"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6fb1fd3ab08c0cbc6826a2e0447610c6f09e983a281b919ed721ad32236b8b23"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e965a9c1e9a393b8005031ff52583cedc15b7884fce7deb8b0346388837d6cfe"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:79e70b8342a33b52b55d93b3a59223a844962bef479f6a0ea318ebbcadf71505"}, + {file = "bcrypt-4.3.0-cp39-abi3-win32.whl", hash = "sha256:b4d4e57f0a63fd0b358eb765063ff661328f69a04494427265950c71b992a39a"}, + {file = "bcrypt-4.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:e53e074b120f2877a35cc6c736b8eb161377caae8925c17688bd46ba56daaa5b"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c950d682f0952bafcceaf709761da0a32a942272fad381081b51096ffa46cea1"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:107d53b5c67e0bbc3f03ebf5b030e0403d24dda980f8e244795335ba7b4a027d"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:b693dbb82b3c27a1604a3dff5bfc5418a7e6a781bb795288141e5f80cf3a3492"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:b6354d3760fcd31994a14c89659dee887f1351a06e5dac3c1142307172a79f90"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a839320bf27d474e52ef8cb16449bb2ce0ba03ca9f44daba6d93fa1d8828e48a"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:bdc6a24e754a555d7316fa4774e64c6c3997d27ed2d1964d55920c7c227bc4ce"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:55a935b8e9a1d2def0626c4269db3fcd26728cbff1e84f0341465c31c4ee56d8"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:57967b7a28d855313a963aaea51bf6df89f833db4320da458e5b3c5ab6d4c938"}, + {file = "bcrypt-4.3.0.tar.gz", hash = "sha256:3a3fd2204178b6d2adcf09cb4f6426ffef54762577a7c9b54c159008cb288c18"}, +] + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] + +[[package]] +name = "blinker" +version = "1.8.2" +description = "Fast, simple object-to-object and broadcast signaling" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "blinker-1.8.2-py3-none-any.whl", hash = "sha256:1779309f71bf239144b9399d06ae925637cf6634cf6bd131104184531bf67c01"}, + {file = "blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83"}, +] + +[[package]] +name = "blinker" +version = "1.9.0" +description = "Fast, simple object-to-object and broadcast signaling" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc"}, + {file = "blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf"}, +] + +[[package]] +name = "build" +version = "1.2.2.post1" +description = "A simple, correct Python build frontend" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5"}, + {file = "build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "os_name == \"nt\""} +importlib-metadata = {version = ">=4.6", markers = "python_full_version < \"3.10.2\""} +packaging = ">=19.1" +pyproject_hooks = "*" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["furo (>=2023.08.17)", "sphinx (>=7.0,<8.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)", "sphinx-issues (>=3.0.0)"] +test = ["build[uv,virtualenv]", "filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2.12)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0) ; python_version < \"3.10\"", "setuptools (>=56.0.0) ; python_version == \"3.10\"", "setuptools (>=56.0.0) ; python_version == \"3.11\"", "setuptools (>=67.8.0) ; python_version >= \"3.12\"", "wheel (>=0.36.0)"] +typing = ["build[uv]", "importlib-metadata (>=5.1)", "mypy (>=1.9.0,<1.10.0)", "tomli", "typing-extensions (>=3.7.4.3)"] +uv = ["uv (>=0.1.18)"] +virtualenv = ["virtualenv (>=20.0.35)"] + +[[package]] +name = "cachetools" +version = "5.3.1" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "cachetools-5.3.1-py3-none-any.whl", hash = "sha256:95ef631eeaea14ba2e36f06437f36463aac3a096799e876ee55e5cdccb102590"}, + {file = "cachetools-5.3.1.tar.gz", hash = "sha256:dce83f2d9b4e1f732a8cd44af8e8fab2dbe46201467fc98b3ef8f269092bf62b"}, +] + +[[package]] +name = "certifi" +version = "2025.6.15" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"}, + {file = "certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, + {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, + {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, +] + +[[package]] +name = "chroma-hnswlib" +version = "0.7.6" +description = "Chromas fork of hnswlib" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f35192fbbeadc8c0633f0a69c3d3e9f1a4eab3a46b65458bbcbcabdd9e895c36"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f007b608c96362b8f0c8b6b2ac94f67f83fcbabd857c378ae82007ec92f4d82"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:456fd88fa0d14e6b385358515aef69fc89b3c2191706fd9aee62087b62aad09c"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dfaae825499c2beaa3b75a12d7ec713b64226df72a5c4097203e3ed532680da"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-win_amd64.whl", hash = "sha256:2487201982241fb1581be26524145092c95902cb09fc2646ccfbc407de3328ec"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81181d54a2b1e4727369486a631f977ffc53c5533d26e3d366dda243fb0998ca"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4b4ab4e11f1083dd0a11ee4f0e0b183ca9f0f2ed63ededba1935b13ce2b3606f"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53db45cd9173d95b4b0bdccb4dbff4c54a42b51420599c32267f3abbeb795170"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c093f07a010b499c00a15bc9376036ee4800d335360570b14f7fe92badcdcf9"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-win_amd64.whl", hash = "sha256:0540b0ac96e47d0aa39e88ea4714358ae05d64bbe6bf33c52f316c664190a6a3"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e87e9b616c281bfbe748d01705817c71211613c3b063021f7ed5e47173556cb7"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec5ca25bc7b66d2ecbf14502b5729cde25f70945d22f2aaf523c2d747ea68912"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305ae491de9d5f3c51e8bd52d84fdf2545a4a2bc7af49765cda286b7bb30b1d4"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:822ede968d25a2c88823ca078a58f92c9b5c4142e38c7c8b4c48178894a0a3c5"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2fe6ea949047beed19a94b33f41fe882a691e58b70c55fdaa90274ae78be046f"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feceff971e2a2728c9ddd862a9dd6eb9f638377ad98438876c9aeac96c9482f5"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb0633b60e00a2b92314d0bf5bbc0da3d3320be72c7e3f4a9b19f4609dc2b2ab"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-win_amd64.whl", hash = "sha256:a566abe32fab42291f766d667bdbfa234a7f457dcbd2ba19948b7a978c8ca624"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6be47853d9a58dedcfa90fc846af202b071f028bbafe1d8711bf64fe5a7f6111"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a7af35bdd39a88bffa49f9bb4bf4f9040b684514a024435a1ef5cdff980579d"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a53b1f1551f2b5ad94eb610207bde1bb476245fc5097a2bec2b476c653c58bde"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3085402958dbdc9ff5626ae58d696948e715aef88c86d1e3f9285a88f1afd3bc"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-win_amd64.whl", hash = "sha256:77326f658a15adfb806a16543f7db7c45f06fd787d699e643642d6bde8ed49c4"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:93b056ab4e25adab861dfef21e1d2a2756b18be5bc9c292aa252fa12bb44e6ae"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fe91f018b30452c16c811fd6c8ede01f84e5a9f3c23e0758775e57f1c3778871"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6c0e627476f0f4d9e153420d36042dd9c6c3671cfd1fe511c0253e38c2a1039"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e9796a4536b7de6c6d76a792ba03e08f5aaa53e97e052709568e50b4d20c04f"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-win_amd64.whl", hash = "sha256:d30e2db08e7ffdcc415bd072883a322de5995eb6ec28a8f8c054103bbd3ec1e0"}, + {file = "chroma_hnswlib-0.7.6.tar.gz", hash = "sha256:4dce282543039681160259d29fcde6151cc9106c6461e0485f57cdccd83059b7"}, +] + +[package.dependencies] +numpy = "*" + +[[package]] +name = "chromadb" +version = "0.5.23" +description = "Chroma." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "chromadb-0.5.23-py3-none-any.whl", hash = "sha256:ffe5bdd7276d12cb682df0d38a13aa37573e6a3678e71889ac45f539ae05ad7e"}, + {file = "chromadb-0.5.23.tar.gz", hash = "sha256:360a12b9795c5a33cb1f839d14410ccbde662ef1accd36153b0ae22312edabd1"}, +] + +[package.dependencies] +bcrypt = ">=4.0.1" +build = ">=1.0.3" +chroma-hnswlib = "0.7.6" +fastapi = ">=0.95.2" +graphlib_backport = {version = ">=1.0.3", markers = "python_version < \"3.9\""} +grpcio = ">=1.58.0" +httpx = ">=0.27.0" +importlib-resources = "*" +kubernetes = ">=28.1.0" +mmh3 = ">=4.0.1" +numpy = ">=1.22.5" +onnxruntime = ">=1.14.1" +opentelemetry-api = ">=1.2.0" +opentelemetry-exporter-otlp-proto-grpc = ">=1.2.0" +opentelemetry-instrumentation-fastapi = ">=0.41b0" +opentelemetry-sdk = ">=1.2.0" +orjson = ">=3.9.12" +overrides = ">=7.3.1" +posthog = ">=2.4.0" +pydantic = ">=1.9" +pypika = ">=0.48.9" +PyYAML = ">=6.0.0" +rich = ">=10.11.0" +tenacity = ">=8.2.3" +tokenizers = ">=0.13.2,<=0.20.3" +tqdm = ">=4.65.0" +typer = ">=0.9.0" +typing_extensions = ">=4.5.0" +uvicorn = {version = ">=0.18.3", extras = ["standard"]} + +[[package]] +name = "click" +version = "8.1.8" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "click" +version = "8.2.1" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, + {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] +markers = {main = "platform_system == \"Windows\" or os_name == \"nt\" or sys_platform == \"win32\"", dev = "sys_platform == \"win32\""} + +[[package]] +name = "coloredlogs" +version = "15.0.1" +description = "Colored terminal output for Python's logging module" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["main"] +files = [ + {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, + {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, +] + +[package.dependencies] +humanfriendly = ">=9.1" + +[package.extras] +cron = ["capturer (>=2.4)"] + +[[package]] +name = "coverage" +version = "7.6.1" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "coverage" +version = "7.9.1" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "coverage-7.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cc94d7c5e8423920787c33d811c0be67b7be83c705f001f7180c7b186dcf10ca"}, + {file = "coverage-7.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16aa0830d0c08a2c40c264cef801db8bc4fc0e1892782e45bcacbd5889270509"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf95981b126f23db63e9dbe4cf65bd71f9a6305696fa5e2262693bc4e2183f5b"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f05031cf21699785cd47cb7485f67df619e7bcdae38e0fde40d23d3d0210d3c3"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb4fbcab8764dc072cb651a4bcda4d11fb5658a1d8d68842a862a6610bd8cfa3"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0f16649a7330ec307942ed27d06ee7e7a38417144620bb3d6e9a18ded8a2d3e5"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cea0a27a89e6432705fffc178064503508e3c0184b4f061700e771a09de58187"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e980b53a959fa53b6f05343afbd1e6f44a23ed6c23c4b4c56c6662bbb40c82ce"}, + {file = "coverage-7.9.1-cp310-cp310-win32.whl", hash = "sha256:70760b4c5560be6ca70d11f8988ee6542b003f982b32f83d5ac0b72476607b70"}, + {file = "coverage-7.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a66e8f628b71f78c0e0342003d53b53101ba4e00ea8dabb799d9dba0abbbcebe"}, + {file = "coverage-7.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95c765060e65c692da2d2f51a9499c5e9f5cf5453aeaf1420e3fc847cc060582"}, + {file = "coverage-7.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ba383dc6afd5ec5b7a0d0c23d38895db0e15bcba7fb0fa8901f245267ac30d86"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37ae0383f13cbdcf1e5e7014489b0d71cc0106458878ccde52e8a12ced4298ed"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69aa417a030bf11ec46149636314c24c8d60fadb12fc0ee8f10fda0d918c879d"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a4be2a28656afe279b34d4f91c3e26eccf2f85500d4a4ff0b1f8b54bf807338"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:382e7ddd5289f140259b610e5f5c58f713d025cb2f66d0eb17e68d0a94278875"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e5532482344186c543c37bfad0ee6069e8ae4fc38d073b8bc836fc8f03c9e250"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a39d18b3f50cc121d0ce3838d32d58bd1d15dab89c910358ebefc3665712256c"}, + {file = "coverage-7.9.1-cp311-cp311-win32.whl", hash = "sha256:dd24bd8d77c98557880def750782df77ab2b6885a18483dc8588792247174b32"}, + {file = "coverage-7.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:6b55ad10a35a21b8015eabddc9ba31eb590f54adc9cd39bcf09ff5349fd52125"}, + {file = "coverage-7.9.1-cp311-cp311-win_arm64.whl", hash = "sha256:6ad935f0016be24c0e97fc8c40c465f9c4b85cbbe6eac48934c0dc4d2568321e"}, + {file = "coverage-7.9.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a8de12b4b87c20de895f10567639c0797b621b22897b0af3ce4b4e204a743626"}, + {file = "coverage-7.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5add197315a054e92cee1b5f686a2bcba60c4c3e66ee3de77ace6c867bdee7cb"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:600a1d4106fe66f41e5d0136dfbc68fe7200a5cbe85610ddf094f8f22e1b0300"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a876e4c3e5a2a1715a6608906aa5a2e0475b9c0f68343c2ada98110512ab1d8"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81f34346dd63010453922c8e628a52ea2d2ccd73cb2487f7700ac531b247c8a5"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:888f8eee13f2377ce86d44f338968eedec3291876b0b8a7289247ba52cb984cd"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9969ef1e69b8c8e1e70d591f91bbc37fc9a3621e447525d1602801a24ceda898"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:60c458224331ee3f1a5b472773e4a085cc27a86a0b48205409d364272d67140d"}, + {file = "coverage-7.9.1-cp312-cp312-win32.whl", hash = "sha256:5f646a99a8c2b3ff4c6a6e081f78fad0dde275cd59f8f49dc4eab2e394332e74"}, + {file = "coverage-7.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:30f445f85c353090b83e552dcbbdad3ec84c7967e108c3ae54556ca69955563e"}, + {file = "coverage-7.9.1-cp312-cp312-win_arm64.whl", hash = "sha256:af41da5dca398d3474129c58cb2b106a5d93bbb196be0d307ac82311ca234342"}, + {file = "coverage-7.9.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:31324f18d5969feef7344a932c32428a2d1a3e50b15a6404e97cba1cc9b2c631"}, + {file = "coverage-7.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0c804506d624e8a20fb3108764c52e0eef664e29d21692afa375e0dd98dc384f"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef64c27bc40189f36fcc50c3fb8f16ccda73b6a0b80d9bd6e6ce4cffcd810bbd"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4fe2348cc6ec372e25adec0219ee2334a68d2f5222e0cba9c0d613394e12d86"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34ed2186fe52fcc24d4561041979a0dec69adae7bce2ae8d1c49eace13e55c43"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:25308bd3d00d5eedd5ae7d4357161f4df743e3c0240fa773ee1b0f75e6c7c0f1"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73e9439310f65d55a5a1e0564b48e34f5369bee943d72c88378f2d576f5a5751"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ab6be0859141b53aa89412a82454b482c81cf750de4f29223d52268a86de67"}, + {file = "coverage-7.9.1-cp313-cp313-win32.whl", hash = "sha256:64bdd969456e2d02a8b08aa047a92d269c7ac1f47e0c977675d550c9a0863643"}, + {file = "coverage-7.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:be9e3f68ca9edb897c2184ad0eee815c635565dbe7a0e7e814dc1f7cbab92c0a"}, + {file = "coverage-7.9.1-cp313-cp313-win_arm64.whl", hash = "sha256:1c503289ffef1d5105d91bbb4d62cbe4b14bec4d13ca225f9c73cde9bb46207d"}, + {file = "coverage-7.9.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0b3496922cb5f4215bf5caaef4cf12364a26b0be82e9ed6d050f3352cf2d7ef0"}, + {file = "coverage-7.9.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9565c3ab1c93310569ec0d86b017f128f027cab0b622b7af288696d7ed43a16d"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2241ad5dbf79ae1d9c08fe52b36d03ca122fb9ac6bca0f34439e99f8327ac89f"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bb5838701ca68b10ebc0937dbd0eb81974bac54447c55cd58dea5bca8451029"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b30a25f814591a8c0c5372c11ac8967f669b97444c47fd794926e175c4047ece"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2d04b16a6062516df97969f1ae7efd0de9c31eb6ebdceaa0d213b21c0ca1a683"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7931b9e249edefb07cd6ae10c702788546341d5fe44db5b6108a25da4dca513f"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52e92b01041151bf607ee858e5a56c62d4b70f4dac85b8c8cb7fb8a351ab2c10"}, + {file = "coverage-7.9.1-cp313-cp313t-win32.whl", hash = "sha256:684e2110ed84fd1ca5f40e89aa44adf1729dc85444004111aa01866507adf363"}, + {file = "coverage-7.9.1-cp313-cp313t-win_amd64.whl", hash = "sha256:437c576979e4db840539674e68c84b3cda82bc824dd138d56bead1435f1cb5d7"}, + {file = "coverage-7.9.1-cp313-cp313t-win_arm64.whl", hash = "sha256:18a0912944d70aaf5f399e350445738a1a20b50fbea788f640751c2ed9208b6c"}, + {file = "coverage-7.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6f424507f57878e424d9a95dc4ead3fbdd72fd201e404e861e465f28ea469951"}, + {file = "coverage-7.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:535fde4001b2783ac80865d90e7cc7798b6b126f4cd8a8c54acfe76804e54e58"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02532fd3290bb8fa6bec876520842428e2a6ed6c27014eca81b031c2d30e3f71"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56f5eb308b17bca3bbff810f55ee26d51926d9f89ba92707ee41d3c061257e55"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfa447506c1a52271f1b0de3f42ea0fa14676052549095e378d5bff1c505ff7b"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9ca8e220006966b4a7b68e8984a6aee645a0384b0769e829ba60281fe61ec4f7"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:49f1d0788ba5b7ba65933f3a18864117c6506619f5ca80326b478f72acf3f385"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68cd53aec6f45b8e4724c0950ce86eacb775c6be01ce6e3669fe4f3a21e768ed"}, + {file = "coverage-7.9.1-cp39-cp39-win32.whl", hash = "sha256:95335095b6c7b1cc14c3f3f17d5452ce677e8490d101698562b2ffcacc304c8d"}, + {file = "coverage-7.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:e1b5191d1648acc439b24721caab2fd0c86679d8549ed2c84d5a7ec1bedcc244"}, + {file = "coverage-7.9.1-pp39.pp310.pp311-none-any.whl", hash = "sha256:db0f04118d1db74db6c9e1cb1898532c7dcc220f1d2718f058601f7c3f499514"}, + {file = "coverage-7.9.1-py3-none-any.whl", hash = "sha256:66b974b145aa189516b6bf2d8423e888b742517d37872f6ee4c5be0073bd9a3c"}, + {file = "coverage-7.9.1.tar.gz", hash = "sha256:6cf43c78c4282708a28e466316935ec7489a9c487518a77fa68f716c67909cec"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "crcmod" +version = "1.7" +description = "CRC Generator" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "crcmod-1.7.tar.gz", hash = "sha256:dc7051a0db5f2bd48665a990d3ec1cc305a466a77358ca4492826f41f283601e"}, +] + +[[package]] +name = "cryptography" +version = "43.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "cryptography" +version = "45.0.4" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = "!=3.9.0,!=3.9.1,>=3.7" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "cryptography-45.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:425a9a6ac2823ee6e46a76a21a4e8342d8fa5c01e08b823c1f19a8b74f096069"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:680806cf63baa0039b920f4976f5f31b10e772de42f16310a6839d9f21a26b0d"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4ca0f52170e821bc8da6fc0cc565b7bb8ff8d90d36b5e9fdd68e8a86bdf72036"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f3fe7a5ae34d5a414957cc7f457e2b92076e72938423ac64d215722f6cf49a9e"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:25eb4d4d3e54595dc8adebc6bbd5623588991d86591a78c2548ffb64797341e2"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce1678a2ccbe696cf3af15a75bb72ee008d7ff183c9228592ede9db467e64f1b"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:49fe9155ab32721b9122975e168a6760d8ce4cffe423bcd7ca269ba41b5dfac1"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:2882338b2a6e0bd337052e8b9007ced85c637da19ef9ecaf437744495c8c2999"}, + {file = "cryptography-45.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:23b9c3ea30c3ed4db59e7b9619272e94891f8a3a5591d0b656a7582631ccf750"}, + {file = "cryptography-45.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b0a97c927497e3bc36b33987abb99bf17a9a175a19af38a892dc4bbb844d7ee2"}, + {file = "cryptography-45.0.4-cp311-abi3-win32.whl", hash = "sha256:e00a6c10a5c53979d6242f123c0a97cff9f3abed7f064fc412c36dc521b5f257"}, + {file = "cryptography-45.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:817ee05c6c9f7a69a16200f0c90ab26d23a87701e2a284bd15156783e46dbcc8"}, + {file = "cryptography-45.0.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:964bcc28d867e0f5491a564b7debb3ffdd8717928d315d12e0d7defa9e43b723"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6a5bf57554e80f75a7db3d4b1dacaa2764611ae166ab42ea9a72bcdb5d577637"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:46cf7088bf91bdc9b26f9c55636492c1cce3e7aaf8041bbf0243f5e5325cfb2d"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7bedbe4cc930fa4b100fc845ea1ea5788fcd7ae9562e669989c11618ae8d76ee"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:eaa3e28ea2235b33220b949c5a0d6cf79baa80eab2eb5607ca8ab7525331b9ff"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:7ef2dde4fa9408475038fc9aadfc1fb2676b174e68356359632e980c661ec8f6"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:6a3511ae33f09094185d111160fd192c67aa0a2a8d19b54d36e4c78f651dc5ad"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:06509dc70dd71fa56eaa138336244e2fbaf2ac164fc9b5e66828fccfd2b680d6"}, + {file = "cryptography-45.0.4-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5f31e6b0a5a253f6aa49be67279be4a7e5a4ef259a9f33c69f7d1b1191939872"}, + {file = "cryptography-45.0.4-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:944e9ccf67a9594137f942d5b52c8d238b1b4e46c7a0c2891b7ae6e01e7c80a4"}, + {file = "cryptography-45.0.4-cp37-abi3-win32.whl", hash = "sha256:c22fe01e53dc65edd1945a2e6f0015e887f84ced233acecb64b4daadb32f5c97"}, + {file = "cryptography-45.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:627ba1bc94f6adf0b0a2e35d87020285ead22d9f648c7e75bb64f367375f3b22"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a77c6fb8d76e9c9f99f2f3437c1a4ac287b34eaf40997cfab1e9bd2be175ac39"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7aad98a25ed8ac917fdd8a9c1e706e5a0956e06c498be1f713b61734333a4507"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3530382a43a0e524bc931f187fc69ef4c42828cf7d7f592f7f249f602b5a4ab0"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:6b613164cb8425e2f8db5849ffb84892e523bf6d26deb8f9bb76ae86181fa12b"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:96d4819e25bf3b685199b304a0029ce4a3caf98947ce8a066c9137cc78ad2c58"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b97737a3ffbea79eebb062eb0d67d72307195035332501722a9ca86bab9e3ab2"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4828190fb6c4bcb6ebc6331f01fe66ae838bb3bd58e753b59d4b22eb444b996c"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:03dbff8411206713185b8cebe31bc5c0eb544799a50c09035733716b386e61a4"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51dfbd4d26172d31150d84c19bbe06c68ea4b7f11bbc7b3a5e146b367c311349"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:0339a692de47084969500ee455e42c58e449461e0ec845a34a6a9b9bf7df7fb8"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:0cf13c77d710131d33e63626bd55ae7c0efb701ebdc2b3a7952b9b23a0412862"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:bbc505d1dc469ac12a0a064214879eac6294038d6b24ae9f71faae1448a9608d"}, + {file = "cryptography-45.0.4.tar.gz", hash = "sha256:7405ade85c83c37682c8fe65554759800a4a8c54b2d96e0f8ad114d31b808d57"}, +] + +[package.dependencies] +cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""] +docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] +nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""] +pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] +sdist = ["build (>=1.0.0)"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi (>=2024)", "cryptography-vectors (==45.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "datasets" +version = "2.18.0" +description = "HuggingFace community-driven open-source library of datasets" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "datasets-2.18.0-py3-none-any.whl", hash = "sha256:f1bbf0e2896917a914de01cbd37075b14deea3837af87ad0d9f697388ccaeb50"}, + {file = "datasets-2.18.0.tar.gz", hash = "sha256:cdf8b8c6abf7316377ba4f49f9589a4c74556d6b481afd0abd2284f3d69185cb"}, +] + +[package.dependencies] +aiohttp = "*" +dill = ">=0.3.0,<0.3.9" +filelock = "*" +fsspec = {version = ">=2023.1.0,<=2024.2.0", extras = ["http"]} +huggingface-hub = ">=0.19.4" +multiprocess = "*" +numpy = ">=1.17" +packaging = "*" +pandas = "*" +pyarrow = ">=12.0.0" +pyarrow-hotfix = "*" +pyyaml = ">=5.1" +requests = ">=2.19.0" +tqdm = ">=4.62.1" +xxhash = "*" + +[package.extras] +apache-beam = ["apache-beam (>=2.26.0)"] +audio = ["librosa", "soundfile (>=0.12.1)"] +benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] +dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0) ; sys_platform != \"win32\" and python_version < \"3.10\"", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14) ; sys_platform != \"win32\"", "jaxlib (>=0.3.14) ; sys_platform != \"win32\"", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1) ; sys_platform != \"darwin\" or platform_machine != \"arm64\"", "tensorflow (>=2.3,!=2.6.0,!=2.6.1) ; sys_platform != \"darwin\" or platform_machine != \"arm64\"", "tensorflow-macos ; sys_platform == \"darwin\" and platform_machine == \"arm64\"", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] +docs = ["s3fs", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1) ; sys_platform != \"darwin\" or platform_machine != \"arm64\"", "tensorflow-macos ; sys_platform == \"darwin\" and platform_machine == \"arm64\"", "torch", "transformers"] +jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"] +metrics-tests = ["Werkzeug (>=1.0.1)", "accelerate", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"] +quality = ["ruff (>=0.3.0)"] +s3 = ["s3fs"] +tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1) ; sys_platform != \"darwin\" or platform_machine != \"arm64\"", "tensorflow-macos ; sys_platform == \"darwin\" and platform_machine == \"arm64\""] +tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"] +tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0) ; sys_platform != \"win32\" and python_version < \"3.10\"", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "jax (>=0.3.14) ; sys_platform != \"win32\"", "jaxlib (>=0.3.14) ; sys_platform != \"win32\"", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy", "tensorflow (>=2.3,!=2.6.0,!=2.6.1) ; sys_platform != \"darwin\" or platform_machine != \"arm64\"", "tensorflow-macos ; sys_platform == \"darwin\" and platform_machine == \"arm64\"", "tiktoken", "torch (>=2.0.0)", "transformers", "typing-extensions (>=4.6.1)", "zstandard"] +torch = ["torch"] +vision = ["Pillow (>=6.2.1)"] + +[[package]] +name = "dbutils" +version = "1.4" +description = "Database connections for multi-threaded environments." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "DBUtils-1.4.tar.gz", hash = "sha256:add830a239114ba3735e8367cc54abdcfe58bf66187c3744b8eefcee537a0a97"}, +] + +[[package]] +name = "deprecated" +version = "1.2.18" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["main"] +files = [ + {file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"}, + {file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"] + +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + +[[package]] +name = "durationpy" +version = "0.10" +description = "Module for converting between datetime.timedelta and Go's Duration strings." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "durationpy-0.10-py3-none-any.whl", hash = "sha256:3b41e1b601234296b4fb368338fdcd3e13e0b4fb5b67345948f4f2bf9868b286"}, + {file = "durationpy-0.10.tar.gz", hash = "sha256:1fa6893409a6e739c9c72334fc65cca1f355dbdd93405d30f726deb5bde42fba"}, +] + +[[package]] +name = "einops" +version = "0.8.1" +description = "A new flavour of deep learning operations" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "einops-0.8.1-py3-none-any.whl", hash = "sha256:919387eb55330f5757c6bea9165c5ff5cfe63a642682ea788a6d472576d81737"}, + {file = "einops-0.8.1.tar.gz", hash = "sha256:de5d960a7a761225532e0f1959e5315ebeafc0cd43394732f103ca44b9837e84"}, +] + +[[package]] +name = "elasticsearch" +version = "7.10.0" +description = "Python client for Elasticsearch" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" +groups = ["main"] +files = [ + {file = "elasticsearch-7.10.0-py2.py3-none-any.whl", hash = "sha256:9a21bfa7dc6a0b0dc142088bd653d8ce5ab284b4f7a3ded716185adf5276a7fe"}, + {file = "elasticsearch-7.10.0.tar.gz", hash = "sha256:9053ca99bc9db84f5d80e124a79a32dfa0f7079b2112b546a03241c0dbeda36d"}, +] + +[package.dependencies] +certifi = "*" +urllib3 = ">=1.21.1,<2" + +[package.extras] +async = ["aiohttp (>=3,<4)"] +develop = ["black", "coverage", "jinja2", "mock", "pytest", "pytest-cov", "pyyaml", "requests (>=2.0.0,<3.0.0)", "sphinx (<1.7)", "sphinx-rtd-theme"] +docs = ["sphinx (<1.7)", "sphinx-rtd-theme"] +requests = ["requests (>=2.4.0,<3.0.0)"] + +[[package]] +name = "environs" +version = "9.5.0" +description = "simplified environment variable parsing" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "environs-9.5.0-py2.py3-none-any.whl", hash = "sha256:1e549569a3de49c05f856f40bce86979e7d5ffbbc4398e7f338574c220189124"}, + {file = "environs-9.5.0.tar.gz", hash = "sha256:a76307b36fbe856bdca7ee9161e6c466fd7fcffc297109a118c59b54e27e30c9"}, +] + +[package.dependencies] +marshmallow = ">=3.0.0" +python-dotenv = "*" + +[package.extras] +dev = ["dj-database-url", "dj-email-url", "django-cache-url", "flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)", "pytest", "tox"] +django = ["dj-database-url", "dj-email-url", "django-cache-url"] +lint = ["flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)"] +tests = ["dj-database-url", "dj-email-url", "django-cache-url", "pytest"] + +[[package]] +name = "exceptiongroup" +version = "1.3.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version <= \"3.10\"" +files = [ + {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, + {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "faiss-cpu" +version = "1.7.4" +description = "A library for efficient similarity search and clustering of dense vectors." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "faiss-cpu-1.7.4.tar.gz", hash = "sha256:265dc31b0c079bf4433303bf6010f73922490adff9188b915e2d3f5e9c82dd0a"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50d4ebe7f1869483751c558558504f818980292a9b55be36f9a1ee1009d9a686"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7b1db7fae7bd8312aeedd0c41536bcd19a6e297229e1dce526bde3a73ab8c0b5"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17b7fa7194a228a84929d9e6619d0e7dbf00cc0f717e3462253766f5e3d07de8"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dca531952a2e3eac56f479ff22951af4715ee44788a3fe991d208d766d3f95f3"}, + {file = "faiss_cpu-1.7.4-cp310-cp310-win_amd64.whl", hash = "sha256:7173081d605e74766f950f2e3d6568a6f00c53f32fd9318063e96728c6c62821"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0bbd6f55d7940cc0692f79e32a58c66106c3c950cee2341b05722de9da23ea3"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e13c14280376100f143767d0efe47dcb32618f69e62bbd3ea5cd38c2e1755926"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c521cb8462f3b00c0c7dfb11caff492bb67816528b947be28a3b76373952c41d"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afdd9fe1141117fed85961fd36ee627c83fc3b9fd47bafb52d3c849cc2f088b7"}, + {file = "faiss_cpu-1.7.4-cp311-cp311-win_amd64.whl", hash = "sha256:2ff7f57889ea31d945e3b87275be3cad5d55b6261a4e3f51c7aba304d76b81fb"}, + {file = "faiss_cpu-1.7.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:eeaf92f27d76249fb53c1adafe617b0f217ab65837acf7b4ec818511caf6e3d8"}, + {file = "faiss_cpu-1.7.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:102b1bd763e9b0c281ac312590af3eaf1c8b663ccbc1145821fe6a9f92b8eaaf"}, + {file = "faiss_cpu-1.7.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5512da6707c967310c46ff712b00418b7ae28e93cb609726136e826e9f2f14fa"}, + {file = "faiss_cpu-1.7.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0c2e5b9d8c28c99f990e87379d5bbcc6c914da91ebb4250166864fd12db5755b"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f67f325393145d360171cd98786fcea6120ce50397319afd3bb78be409fb8a"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6a4e4af194b8fce74c4b770cad67ad1dd1b4673677fc169723e4c50ba5bd97a8"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31bfb7b9cffc36897ae02a983e04c09fe3b8c053110a287134751a115334a1df"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52d7de96abef2340c0d373c1f5cbc78026a3cebb0f8f3a5920920a00210ead1f"}, + {file = "faiss_cpu-1.7.4-cp38-cp38-win_amd64.whl", hash = "sha256:699feef85b23c2c729d794e26ca69bebc0bee920d676028c06fd0e0becc15c7e"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:559a0133f5ed44422acb09ee1ac0acffd90c6666d1bc0d671c18f6e93ad603e2"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1d71539fe3dc0f1bed41ef954ca701678776f231046bf0ca22ccea5cf5bef6"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12d45e0157024eb3249842163162983a1ac8b458f1a8b17bbf86f01be4585a99"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f0eab359e066d32c874f51a7d4bf6440edeec068b7fe47e6d803c73605a8b4c"}, + {file = "faiss_cpu-1.7.4-cp39-cp39-win_amd64.whl", hash = "sha256:98459ceeeb735b9df1a5b94572106ffe0a6ce740eb7e4626715dd218657bb4dc"}, +] + +[[package]] +name = "fastapi" +version = "0.115.5" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "fastapi-0.115.5-py3-none-any.whl", hash = "sha256:596b95adbe1474da47049e802f9a65ab2ffa9c2b07e7efee70eb8a66c9f2f796"}, + {file = "fastapi-0.115.5.tar.gz", hash = "sha256:0e7a4d0dc0d01c68df21887cce0945e72d3c48b9f4f79dfe7a7d53aa08fbb289"}, +] + +[package.dependencies] +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.40.0,<0.42.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=2.11.2)", "python-multipart (>=0.0.7)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] + +[[package]] +name = "filelock" +version = "3.18.0" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"}, + {file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] + +[[package]] +name = "flask" +version = "3.0.0" +description = "A simple framework for building complex web applications." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "flask-3.0.0-py3-none-any.whl", hash = "sha256:21128f47e4e3b9d597a3e8521a329bf56909b690fcc3fa3e477725aa81367638"}, + {file = "flask-3.0.0.tar.gz", hash = "sha256:cfadcdb638b609361d29ec22360d6070a77d7463dcb3ab08d2c2f2f168845f58"}, +] + +[package.dependencies] +blinker = ">=1.6.2" +click = ">=8.1.3" +importlib-metadata = {version = ">=3.6.0", markers = "python_version < \"3.10\""} +itsdangerous = ">=2.1.2" +Jinja2 = ">=3.1.2" +Werkzeug = ">=3.0.0" + +[package.extras] +async = ["asgiref (>=3.2)"] +dotenv = ["python-dotenv"] + +[[package]] +name = "flatbuffers" +version = "25.2.10" +description = "The FlatBuffers serialization format for Python" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "flatbuffers-25.2.10-py2.py3-none-any.whl", hash = "sha256:ebba5f4d5ea615af3f7fd70fc310636fbb2bbd1f566ac0a23d98dd412de50051"}, + {file = "flatbuffers-25.2.10.tar.gz", hash = "sha256:97e451377a41262f8d9bd4295cc836133415cc03d8cb966410a4af92eb00d26e"}, +] + +[[package]] +name = "frozenlist" +version = "1.5.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5"}, + {file = "frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb"}, + {file = "frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e"}, + {file = "frozenlist-1.5.0-cp38-cp38-win32.whl", hash = "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723"}, + {file = "frozenlist-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c"}, + {file = "frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3"}, + {file = "frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, +] + +[[package]] +name = "frozenlist" +version = "1.7.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "frozenlist-1.7.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc4df77d638aa2ed703b878dd093725b72a824c3c546c076e8fdf276f78ee84a"}, + {file = "frozenlist-1.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:716a9973a2cc963160394f701964fe25012600f3d311f60c790400b00e568b61"}, + {file = "frozenlist-1.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0fd1bad056a3600047fb9462cff4c5322cebc59ebf5d0a3725e0ee78955001d"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3789ebc19cb811163e70fe2bd354cea097254ce6e707ae42e56f45e31e96cb8e"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af369aa35ee34f132fcfad5be45fbfcde0e3a5f6a1ec0712857f286b7d20cca9"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac64b6478722eeb7a3313d494f8342ef3478dff539d17002f849101b212ef97c"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f89f65d85774f1797239693cef07ad4c97fdd0639544bad9ac4b869782eb1981"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1073557c941395fdfcfac13eb2456cb8aad89f9de27bae29fabca8e563b12615"}, + {file = "frozenlist-1.7.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed8d2fa095aae4bdc7fdd80351009a48d286635edffee66bf865e37a9125c50"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:24c34bea555fe42d9f928ba0a740c553088500377448febecaa82cc3e88aa1fa"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:69cac419ac6a6baad202c85aaf467b65ac860ac2e7f2ac1686dc40dbb52f6577"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:960d67d0611f4c87da7e2ae2eacf7ea81a5be967861e0c63cf205215afbfac59"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:41be2964bd4b15bf575e5daee5a5ce7ed3115320fb3c2b71fca05582ffa4dc9e"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:46d84d49e00c9429238a7ce02dc0be8f6d7cd0cd405abd1bebdc991bf27c15bd"}, + {file = "frozenlist-1.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15900082e886edb37480335d9d518cec978afc69ccbc30bd18610b7c1b22a718"}, + {file = "frozenlist-1.7.0-cp310-cp310-win32.whl", hash = "sha256:400ddd24ab4e55014bba442d917203c73b2846391dd42ca5e38ff52bb18c3c5e"}, + {file = "frozenlist-1.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:6eb93efb8101ef39d32d50bce242c84bcbddb4f7e9febfa7b524532a239b4464"}, + {file = "frozenlist-1.7.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:aa51e147a66b2d74de1e6e2cf5921890de6b0f4820b257465101d7f37b49fb5a"}, + {file = "frozenlist-1.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9b35db7ce1cd71d36ba24f80f0c9e7cff73a28d7a74e91fe83e23d27c7828750"}, + {file = "frozenlist-1.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:34a69a85e34ff37791e94542065c8416c1afbf820b68f720452f636d5fb990cd"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a646531fa8d82c87fe4bb2e596f23173caec9185bfbca5d583b4ccfb95183e2"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:79b2ffbba483f4ed36a0f236ccb85fbb16e670c9238313709638167670ba235f"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a26f205c9ca5829cbf82bb2a84b5c36f7184c4316617d7ef1b271a56720d6b30"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bcacfad3185a623fa11ea0e0634aac7b691aa925d50a440f39b458e41c561d98"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72c1b0fe8fe451b34f12dce46445ddf14bd2a5bcad7e324987194dc8e3a74c86"}, + {file = "frozenlist-1.7.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61d1a5baeaac6c0798ff6edfaeaa00e0e412d49946c53fae8d4b8e8b3566c4ae"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7edf5c043c062462f09b6820de9854bf28cc6cc5b6714b383149745e287181a8"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:d50ac7627b3a1bd2dcef6f9da89a772694ec04d9a61b66cf87f7d9446b4a0c31"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ce48b2fece5aeb45265bb7a58259f45027db0abff478e3077e12b05b17fb9da7"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:fe2365ae915a1fafd982c146754e1de6ab3478def8a59c86e1f7242d794f97d5"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:45a6f2fdbd10e074e8814eb98b05292f27bad7d1883afbe009d96abdcf3bc898"}, + {file = "frozenlist-1.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:21884e23cffabb157a9dd7e353779077bf5b8f9a58e9b262c6caad2ef5f80a56"}, + {file = "frozenlist-1.7.0-cp311-cp311-win32.whl", hash = "sha256:284d233a8953d7b24f9159b8a3496fc1ddc00f4db99c324bd5fb5f22d8698ea7"}, + {file = "frozenlist-1.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:387cbfdcde2f2353f19c2f66bbb52406d06ed77519ac7ee21be0232147c2592d"}, + {file = "frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2"}, + {file = "frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb"}, + {file = "frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e"}, + {file = "frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08"}, + {file = "frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43"}, + {file = "frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3"}, + {file = "frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a"}, + {file = "frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee"}, + {file = "frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d"}, + {file = "frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60"}, + {file = "frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b"}, + {file = "frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e"}, + {file = "frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1"}, + {file = "frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba"}, + {file = "frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d"}, + {file = "frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d"}, + {file = "frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384"}, + {file = "frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104"}, + {file = "frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf"}, + {file = "frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81"}, + {file = "frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e"}, + {file = "frozenlist-1.7.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cea3dbd15aea1341ea2de490574a4a37ca080b2ae24e4b4f4b51b9057b4c3630"}, + {file = "frozenlist-1.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7d536ee086b23fecc36c2073c371572374ff50ef4db515e4e503925361c24f71"}, + {file = "frozenlist-1.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dfcebf56f703cb2e346315431699f00db126d158455e513bd14089d992101e44"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:974c5336e61d6e7eb1ea5b929cb645e882aadab0095c5a6974a111e6479f8878"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c70db4a0ab5ab20878432c40563573229a7ed9241506181bba12f6b7d0dc41cb"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1137b78384eebaf70560a36b7b229f752fb64d463d38d1304939984d5cb887b6"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e793a9f01b3e8b5c0bc646fb59140ce0efcc580d22a3468d70766091beb81b35"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74739ba8e4e38221d2c5c03d90a7e542cb8ad681915f4ca8f68d04f810ee0a87"}, + {file = "frozenlist-1.7.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e63344c4e929b1a01e29bc184bbb5fd82954869033765bfe8d65d09e336a677"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2ea2a7369eb76de2217a842f22087913cdf75f63cf1307b9024ab82dfb525938"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:836b42f472a0e006e02499cef9352ce8097f33df43baaba3e0a28a964c26c7d2"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e22b9a99741294b2571667c07d9f8cceec07cb92aae5ccda39ea1b6052ed4319"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:9a19e85cc503d958abe5218953df722748d87172f71b73cf3c9257a91b999890"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f22dac33bb3ee8fe3e013aa7b91dc12f60d61d05b7fe32191ffa84c3aafe77bd"}, + {file = "frozenlist-1.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ccec739a99e4ccf664ea0775149f2749b8a6418eb5b8384b4dc0a7d15d304cb"}, + {file = "frozenlist-1.7.0-cp39-cp39-win32.whl", hash = "sha256:b3950f11058310008a87757f3eee16a8e1ca97979833239439586857bc25482e"}, + {file = "frozenlist-1.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:43a82fce6769c70f2f5a06248b614a7d268080a9d20f7457ef10ecee5af82b63"}, + {file = "frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e"}, + {file = "frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f"}, +] + +[[package]] +name = "fsspec" +version = "2024.2.0" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "fsspec-2024.2.0-py3-none-any.whl", hash = "sha256:817f969556fa5916bc682e02ca2045f96ff7f586d45110fcb76022063ad2c7d8"}, + {file = "fsspec-2024.2.0.tar.gz", hash = "sha256:b6ad1a679f760dda52b1168c859d01b7b80648ea6f7f7c7f5a8a91dc3f3ecb84"}, +] + +[package.dependencies] +aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""} + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +devel = ["pytest", "pytest-cov"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] + +[[package]] +name = "gast" +version = "0.6.0" +description = "Python AST that abstracts the underlying Python version" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["main"] +files = [ + {file = "gast-0.6.0-py3-none-any.whl", hash = "sha256:52b182313f7330389f72b069ba00f174cfe2a06411099547288839c6cbafbd54"}, + {file = "gast-0.6.0.tar.gz", hash = "sha256:88fc5300d32c7ac6ca7b515310862f71e6fdf2c029bbec7c66c0f5dd47b6b1fb"}, +] + +[[package]] +name = "google-auth" +version = "2.40.3" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_auth-2.40.3-py2.py3-none-any.whl", hash = "sha256:1370d4593e86213563547f97a92752fc658456fe4514c809544f330fed45a7ca"}, + {file = "google_auth-2.40.3.tar.gz", hash = "sha256:500c3a29adedeb36ea9cf24b8d10858e152f2412e3ca37829b3fa18e33d63b77"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0)", "requests (>=2.20.0,<3.0.0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyjwt = ["cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "pyjwt (>=2.0)"] +pyopenssl = ["cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0)"] +testing = ["aiohttp (<3.10.0)", "aiohttp (>=3.6.2,<4.0.0)", "aioresponses", "cryptography (<39.0.0) ; python_version < \"3.8\"", "cryptography (>=38.0.3)", "flask", "freezegun", "grpcio", "mock", "oauth2client", "packaging", "pyjwt (>=2.0)", "pyopenssl (<24.3.0)", "pyopenssl (>=20.0.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-localserver", "pyu2f (>=0.1.5)", "requests (>=2.20.0,<3.0.0)", "responses", "urllib3"] +urllib3 = ["packaging", "urllib3"] + +[[package]] +name = "googleapis-common-protos" +version = "1.70.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"}, + {file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"}, +] + +[package.dependencies] +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0)"] + +[[package]] +name = "graphlib-backport" +version = "1.1.0" +description = "Backport of the Python 3.9 graphlib module for Python 3.6+" +optional = false +python-versions = ">=3.6,<4.0" +groups = ["main"] +markers = "python_version == \"3.8\"" +files = [ + {file = "graphlib_backport-1.1.0-py3-none-any.whl", hash = "sha256:eccacf9f2126cdf89ce32a6018c88e1ecd3e4898a07568add6e1907a439055ba"}, + {file = "graphlib_backport-1.1.0.tar.gz", hash = "sha256:00a7888b21e5393064a133209cb5d3b3ef0a2096cf023914c9d778dff5644125"}, +] + +[[package]] +name = "grpcio" +version = "1.58.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "grpcio-1.58.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:3e6bebf1dfdbeb22afd95650e4f019219fef3ab86d3fca8ebade52e4bc39389a"}, + {file = "grpcio-1.58.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:cde11577d5b6fd73a00e6bfa3cf5f428f3f33c2d2878982369b5372bbc4acc60"}, + {file = "grpcio-1.58.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a2d67ff99e70e86b2be46c1017ae40b4840d09467d5455b2708de6d4c127e143"}, + {file = "grpcio-1.58.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ed979b273a81de36fc9c6716d9fb09dd3443efa18dcc8652501df11da9583e9"}, + {file = "grpcio-1.58.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:458899d2ebd55d5ca2350fd3826dfd8fcb11fe0f79828ae75e2b1e6051d50a29"}, + {file = "grpcio-1.58.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bc7ffef430b80345729ff0a6825e9d96ac87efe39216e87ac58c6c4ef400de93"}, + {file = "grpcio-1.58.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5b23d75e5173faa3d1296a7bedffb25afd2fddb607ef292dfc651490c7b53c3d"}, + {file = "grpcio-1.58.0-cp310-cp310-win32.whl", hash = "sha256:fad9295fe02455d4f158ad72c90ef8b4bcaadfdb5efb5795f7ab0786ad67dd58"}, + {file = "grpcio-1.58.0-cp310-cp310-win_amd64.whl", hash = "sha256:bc325fed4d074367bebd465a20763586e5e1ed5b943e9d8bc7c162b1f44fd602"}, + {file = "grpcio-1.58.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:652978551af02373a5a313e07bfef368f406b5929cf2d50fa7e4027f913dbdb4"}, + {file = "grpcio-1.58.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:9f13a171281ebb4d7b1ba9f06574bce2455dcd3f2f6d1fbe0fd0d84615c74045"}, + {file = "grpcio-1.58.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:8774219e21b05f750eef8adc416e9431cf31b98f6ce9def288e4cea1548cbd22"}, + {file = "grpcio-1.58.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09206106848462763f7f273ca93d2d2d4d26cab475089e0de830bb76be04e9e8"}, + {file = "grpcio-1.58.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62831d5e251dd7561d9d9e83a0b8655084b2a1f8ea91e4bd6b3cedfefd32c9d2"}, + {file = "grpcio-1.58.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:212f38c6a156862098f6bdc9a79bf850760a751d259d8f8f249fc6d645105855"}, + {file = "grpcio-1.58.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4b12754af201bb993e6e2efd7812085ddaaef21d0a6f0ff128b97de1ef55aa4a"}, + {file = "grpcio-1.58.0-cp311-cp311-win32.whl", hash = "sha256:3886b4d56bd4afeac518dbc05933926198aa967a7d1d237a318e6fbc47141577"}, + {file = "grpcio-1.58.0-cp311-cp311-win_amd64.whl", hash = "sha256:002f228d197fea12797a14e152447044e14fb4fdb2eb5d6cfa496f29ddbf79ef"}, + {file = "grpcio-1.58.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:b5e8db0aff0a4819946215f156bd722b6f6c8320eb8419567ffc74850c9fd205"}, + {file = "grpcio-1.58.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:201e550b7e2ede113b63e718e7ece93cef5b0fbf3c45e8fe4541a5a4305acd15"}, + {file = "grpcio-1.58.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:d79b660681eb9bc66cc7cbf78d1b1b9e335ee56f6ea1755d34a31108b80bd3c8"}, + {file = "grpcio-1.58.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ef8d4a76d2c7d8065aba829f8d0bc0055495c998dce1964ca5b302d02514fb3"}, + {file = "grpcio-1.58.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6cba491c638c76d3dc6c191d9c75041ca5b8f5c6de4b8327ecdcab527f130bb4"}, + {file = "grpcio-1.58.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6801ff6652ecd2aae08ef994a3e49ff53de29e69e9cd0fd604a79ae4e545a95c"}, + {file = "grpcio-1.58.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:24edec346e69e672daf12b2c88e95c6f737f3792d08866101d8c5f34370c54fd"}, + {file = "grpcio-1.58.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7e473a7abad9af48e3ab5f3b5d237d18208024d28ead65a459bd720401bd2f8f"}, + {file = "grpcio-1.58.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:4891bbb4bba58acd1d620759b3be11245bfe715eb67a4864c8937b855b7ed7fa"}, + {file = "grpcio-1.58.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:e9f995a8a421405958ff30599b4d0eec244f28edc760de82f0412c71c61763d2"}, + {file = "grpcio-1.58.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:2f85f87e2f087d9f632c085b37440a3169fda9cdde80cb84057c2fc292f8cbdf"}, + {file = "grpcio-1.58.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb6b92036ff312d5b4182fa72e8735d17aceca74d0d908a7f08e375456f03e07"}, + {file = "grpcio-1.58.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d81c2b2b24c32139dd2536972f1060678c6b9fbd106842a9fcdecf07b233eccd"}, + {file = "grpcio-1.58.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:fbcecb6aedd5c1891db1d70efbfbdc126c986645b5dd616a045c07d6bd2dfa86"}, + {file = "grpcio-1.58.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92ae871a902cf19833328bd6498ec007b265aabf2fda845ab5bd10abcaf4c8c6"}, + {file = "grpcio-1.58.0-cp38-cp38-win32.whl", hash = "sha256:dc72e04620d49d3007771c0e0348deb23ca341c0245d610605dddb4ac65a37cb"}, + {file = "grpcio-1.58.0-cp38-cp38-win_amd64.whl", hash = "sha256:1c1c5238c6072470c7f1614bf7c774ffde6b346a100521de9ce791d1e4453afe"}, + {file = "grpcio-1.58.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:fe643af248442221db027da43ed43e53b73e11f40c9043738de9a2b4b6ca7697"}, + {file = "grpcio-1.58.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:128eb1f8e70676d05b1b0c8e6600320fc222b3f8c985a92224248b1367122188"}, + {file = "grpcio-1.58.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:039003a5e0ae7d41c86c768ef8b3ee2c558aa0a23cf04bf3c23567f37befa092"}, + {file = "grpcio-1.58.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f061722cad3f9aabb3fbb27f3484ec9d4667b7328d1a7800c3c691a98f16bb0"}, + {file = "grpcio-1.58.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0af11938acf8cd4cf815c46156bcde36fa5850518120920d52620cc3ec1830"}, + {file = "grpcio-1.58.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d4cef77ad2fed42b1ba9143465856d7e737279854e444925d5ba45fc1f3ba727"}, + {file = "grpcio-1.58.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:24765a627eb4d9288ace32d5104161c3654128fe27f2808ecd6e9b0cfa7fc8b9"}, + {file = "grpcio-1.58.0-cp39-cp39-win32.whl", hash = "sha256:f0241f7eb0d2303a545136c59bc565a35c4fc3b924ccbd69cb482f4828d6f31c"}, + {file = "grpcio-1.58.0-cp39-cp39-win_amd64.whl", hash = "sha256:dcfba7befe3a55dab6fe1eb7fc9359dc0c7f7272b30a70ae0af5d5b063842f28"}, + {file = "grpcio-1.58.0.tar.gz", hash = "sha256:532410c51ccd851b706d1fbc00a87be0f5312bd6f8e5dbf89d4e99c7f79d7499"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.58.0)"] + +[[package]] +name = "h11" +version = "0.16.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, +] + +[[package]] +name = "hf-xet" +version = "1.1.5" +description = "Fast transfer of large files with the Hugging Face Hub." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\"" +files = [ + {file = "hf_xet-1.1.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f52c2fa3635b8c37c7764d8796dfa72706cc4eded19d638331161e82b0792e23"}, + {file = "hf_xet-1.1.5-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:9fa6e3ee5d61912c4a113e0708eaaef987047616465ac7aa30f7121a48fc1af8"}, + {file = "hf_xet-1.1.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc874b5c843e642f45fd85cda1ce599e123308ad2901ead23d3510a47ff506d1"}, + {file = "hf_xet-1.1.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dbba1660e5d810bd0ea77c511a99e9242d920790d0e63c0e4673ed36c4022d18"}, + {file = "hf_xet-1.1.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ab34c4c3104133c495785d5d8bba3b1efc99de52c02e759cf711a91fd39d3a14"}, + {file = "hf_xet-1.1.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:83088ecea236d5113de478acb2339f92c95b4fb0462acaa30621fac02f5a534a"}, + {file = "hf_xet-1.1.5-cp37-abi3-win_amd64.whl", hash = "sha256:73e167d9807d166596b4b2f0b585c6d5bd84a26dea32843665a8b58f6edba245"}, + {file = "hf_xet-1.1.5.tar.gz", hash = "sha256:69ebbcfd9ec44fdc2af73441619eeb06b94ee34511bbcf57cd423820090f5694"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "httpcore" +version = "1.0.9" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.16" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httptools" +version = "0.6.4" +description = "A collection of framework independent HTTP protocol utils." +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "httptools-0.6.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3c73ce323711a6ffb0d247dcd5a550b8babf0f757e86a52558fe5b86d6fefcc0"}, + {file = "httptools-0.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345c288418f0944a6fe67be8e6afa9262b18c7626c3ef3c28adc5eabc06a68da"}, + {file = "httptools-0.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deee0e3343f98ee8047e9f4c5bc7cedbf69f5734454a94c38ee829fb2d5fa3c1"}, + {file = "httptools-0.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca80b7485c76f768a3bc83ea58373f8db7b015551117375e4918e2aa77ea9b50"}, + {file = "httptools-0.6.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:90d96a385fa941283ebd231464045187a31ad932ebfa541be8edf5b3c2328959"}, + {file = "httptools-0.6.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:59e724f8b332319e2875efd360e61ac07f33b492889284a3e05e6d13746876f4"}, + {file = "httptools-0.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:c26f313951f6e26147833fc923f78f95604bbec812a43e5ee37f26dc9e5a686c"}, + {file = "httptools-0.6.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f47f8ed67cc0ff862b84a1189831d1d33c963fb3ce1ee0c65d3b0cbe7b711069"}, + {file = "httptools-0.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0614154d5454c21b6410fdf5262b4a3ddb0f53f1e1721cfd59d55f32138c578a"}, + {file = "httptools-0.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8787367fbdfccae38e35abf7641dafc5310310a5987b689f4c32cc8cc3ee975"}, + {file = "httptools-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b0f7fe4fd38e6a507bdb751db0379df1e99120c65fbdc8ee6c1d044897a636"}, + {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40a5ec98d3f49904b9fe36827dcf1aadfef3b89e2bd05b0e35e94f97c2b14721"}, + {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dacdd3d10ea1b4ca9df97a0a303cbacafc04b5cd375fa98732678151643d4988"}, + {file = "httptools-0.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:288cd628406cc53f9a541cfaf06041b4c71d751856bab45e3702191f931ccd17"}, + {file = "httptools-0.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:df017d6c780287d5c80601dafa31f17bddb170232d85c066604d8558683711a2"}, + {file = "httptools-0.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:85071a1e8c2d051b507161f6c3e26155b5c790e4e28d7f236422dbacc2a9cc44"}, + {file = "httptools-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69422b7f458c5af875922cdb5bd586cc1f1033295aa9ff63ee196a87519ac8e1"}, + {file = "httptools-0.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e603a3bff50db08cd578d54f07032ca1631450ceb972c2f834c2b860c28ea2"}, + {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec4f178901fa1834d4a060320d2f3abc5c9e39766953d038f1458cb885f47e81"}, + {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb89ecf8b290f2e293325c646a211ff1c2493222798bb80a530c5e7502494f"}, + {file = "httptools-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:db78cb9ca56b59b016e64b6031eda5653be0589dba2b1b43453f6e8b405a0970"}, + {file = "httptools-0.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ade273d7e767d5fae13fa637f4d53b6e961fb7fd93c7797562663f0171c26660"}, + {file = "httptools-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:856f4bc0478ae143bad54a4242fccb1f3f86a6e1be5548fecfd4102061b3a083"}, + {file = "httptools-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:322d20ea9cdd1fa98bd6a74b77e2ec5b818abdc3d36695ab402a0de8ef2865a3"}, + {file = "httptools-0.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d87b29bd4486c0093fc64dea80231f7c7f7eb4dc70ae394d70a495ab8436071"}, + {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:342dd6946aa6bda4b8f18c734576106b8a31f2fe31492881a9a160ec84ff4bd5"}, + {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b36913ba52008249223042dca46e69967985fb4051951f94357ea681e1f5dc0"}, + {file = "httptools-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:28908df1b9bb8187393d5b5db91435ccc9c8e891657f9cbb42a2541b44c82fc8"}, + {file = "httptools-0.6.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d3f0d369e7ffbe59c4b6116a44d6a8eb4783aae027f2c0b366cf0aa964185dba"}, + {file = "httptools-0.6.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:94978a49b8f4569ad607cd4946b759d90b285e39c0d4640c6b36ca7a3ddf2efc"}, + {file = "httptools-0.6.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40dc6a8e399e15ea525305a2ddba998b0af5caa2566bcd79dcbe8948181eeaff"}, + {file = "httptools-0.6.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab9ba8dcf59de5181f6be44a77458e45a578fc99c31510b8c65b7d5acc3cf490"}, + {file = "httptools-0.6.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fc411e1c0a7dcd2f902c7c48cf079947a7e65b5485dea9decb82b9105ca71a43"}, + {file = "httptools-0.6.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d54efd20338ac52ba31e7da78e4a72570cf729fac82bc31ff9199bedf1dc7440"}, + {file = "httptools-0.6.4-cp38-cp38-win_amd64.whl", hash = "sha256:df959752a0c2748a65ab5387d08287abf6779ae9165916fe053e68ae1fbdc47f"}, + {file = "httptools-0.6.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:85797e37e8eeaa5439d33e556662cc370e474445d5fab24dcadc65a8ffb04003"}, + {file = "httptools-0.6.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:db353d22843cf1028f43c3651581e4bb49374d85692a85f95f7b9a130e1b2cab"}, + {file = "httptools-0.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ffd262a73d7c28424252381a5b854c19d9de5f56f075445d33919a637e3547"}, + {file = "httptools-0.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c346571fa50d2e9856a37d7cd9435a25e7fd15e236c397bf224afaa355fe9"}, + {file = "httptools-0.6.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aafe0f1918ed07b67c1e838f950b1c1fabc683030477e60b335649b8020e1076"}, + {file = "httptools-0.6.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0e563e54979e97b6d13f1bbc05a96109923e76b901f786a5eae36e99c01237bd"}, + {file = "httptools-0.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:b799de31416ecc589ad79dd85a0b2657a8fe39327944998dea368c1d4c9e55e6"}, + {file = "httptools-0.6.4.tar.gz", hash = "sha256:4e93eee4add6493b59a5c514da98c939b244fce4a0d8879cd3f466562f4b7d5c"}, +] + +[package.extras] +test = ["Cython (>=0.29.24)"] + +[[package]] +name = "httpx" +version = "0.28.1" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" + +[package.extras] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "huggingface-hub" +version = "0.33.1" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "huggingface_hub-0.33.1-py3-none-any.whl", hash = "sha256:ec8d7444628210c0ba27e968e3c4c973032d44dcea59ca0d78ef3f612196f095"}, + {file = "huggingface_hub-0.33.1.tar.gz", hash = "sha256:589b634f979da3ea4b8bdb3d79f97f547840dc83715918daf0b64209c0844c7b"}, +] + +[package.dependencies] +filelock = "*" +fsspec = ">=2023.5.0" +hf-xet = {version = ">=1.1.2,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""} +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (==1.4.0)", "mypy (==1.15.0) ; python_version >= \"3.9\"", "mypy (>=1.14.1,<1.15.0) ; python_version == \"3.8\"", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (==1.4.0)", "mypy (==1.15.0) ; python_version >= \"3.9\"", "mypy (>=1.14.1,<1.15.0) ; python_version == \"3.8\"", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +hf-xet = ["hf-xet (>=1.1.2,<2.0.0)"] +inference = ["aiohttp"] +mcp = ["aiohttp", "mcp (>=1.8.0)", "typer"] +oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"] +quality = ["libcst (==1.4.0)", "mypy (==1.15.0) ; python_version >= \"3.9\"", "mypy (>=1.14.1,<1.15.0) ; python_version == \"3.8\"", "ruff (>=0.9.0)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] + +[[package]] +name = "humanfriendly" +version = "10.0" +description = "Human friendly output for text interfaces using Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["main"] +files = [ + {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, + {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, +] + +[package.dependencies] +pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "importlib-metadata" +version = "8.4.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, + {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] + +[[package]] +name = "importlib-resources" +version = "6.4.5" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"}, + {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["jaraco.test (>=5.4)", "pytest (>=6,!=8.1.*)", "zipp (>=3.17)"] +type = ["pytest-mypy"] + +[[package]] +name = "importlib-resources" +version = "6.5.2" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec"}, + {file = "importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["jaraco.test (>=5.4)", "pytest (>=6,!=8.1.*)", "zipp (>=3.17)"] +type = ["pytest-mypy"] + +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "itsdangerous" +version = "2.2.0" +description = "Safely pass data to untrusted environments and back." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"}, + {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"}, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jmespath" +version = "0.10.0" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main"] +files = [ + {file = "jmespath-0.10.0-py2.py3-none-any.whl", hash = "sha256:cdf6525904cc597730141d61b36f2e4b8ecc257c420fa2f4549bac2c2d0cb72f"}, + {file = "jmespath-0.10.0.tar.gz", hash = "sha256:b85d0567b8666149a93172712e68920734333c0ce7e89b78b3e987f71e5ed4f9"}, +] + +[[package]] +name = "kubernetes" +version = "33.1.0" +description = "Kubernetes python client" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "kubernetes-33.1.0-py2.py3-none-any.whl", hash = "sha256:544de42b24b64287f7e0aa9513c93cb503f7f40eea39b20f66810011a86eabc5"}, + {file = "kubernetes-33.1.0.tar.gz", hash = "sha256:f64d829843a54c251061a8e7a14523b521f2dc5c896cf6d65ccf348648a88993"}, +] + +[package.dependencies] +certifi = ">=14.05.14" +durationpy = ">=0.7" +google-auth = ">=1.0.1" +oauthlib = ">=3.2.2" +python-dateutil = ">=2.5.3" +pyyaml = ">=5.4.1" +requests = "*" +requests-oauthlib = "*" +six = ">=1.9.0" +urllib3 = ">=1.24.2" +websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" + +[package.extras] +adal = ["adal (>=1.0.2)"] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "2.1.5" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "marshmallow" +version = "3.22.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, + {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + +[[package]] +name = "marshmallow" +version = "4.0.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "marshmallow-4.0.0-py3-none-any.whl", hash = "sha256:e7b0528337e9990fd64950f8a6b3a1baabed09ad17a0dfb844d701151f92d203"}, + {file = "marshmallow-4.0.0.tar.gz", hash = "sha256:3b6e80aac299a7935cfb97ed01d1854fb90b5079430969af92118ea1b12a8d55"}, +] + +[package.dependencies] +backports-datetime-fromisoformat = {version = "*", markers = "python_version < \"3.11\""} +typing-extensions = {version = "*", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] +docs = ["autodocsumm (==0.2.14)", "furo (==2024.8.6)", "sphinx (==8.2.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.1)", "sphinxext-opengraph (==0.10.0)"] +tests = ["pytest", "simplejson"] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "minio" +version = "7.2.7" +description = "MinIO Python SDK for Amazon S3 Compatible Cloud Storage" +optional = false +python-versions = "*" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "minio-7.2.7-py3-none-any.whl", hash = "sha256:59d1f255d852fe7104018db75b3bebbd987e538690e680f7c5de835e422de837"}, + {file = "minio-7.2.7.tar.gz", hash = "sha256:473d5d53d79f340f3cd632054d0c82d2f93177ce1af2eac34a235bea55708d98"}, +] + +[package.dependencies] +argon2-cffi = "*" +certifi = "*" +pycryptodome = "*" +typing-extensions = "*" +urllib3 = "*" + +[[package]] +name = "minio" +version = "7.2.15" +description = "MinIO Python SDK for Amazon S3 Compatible Cloud Storage" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "minio-7.2.15-py3-none-any.whl", hash = "sha256:c06ef7a43e5d67107067f77b6c07ebdd68733e5aa7eed03076472410ca19d876"}, + {file = "minio-7.2.15.tar.gz", hash = "sha256:5247df5d4dca7bfa4c9b20093acd5ad43e82d8710ceb059d79c6eea970f49f79"}, +] + +[package.dependencies] +argon2-cffi = "*" +certifi = "*" +pycryptodome = "*" +typing-extensions = "*" +urllib3 = "*" + +[[package]] +name = "mmh3" +version = "5.0.1" +description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "mmh3-5.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f0a4b4bf05778ed77d820d6e7d0e9bd6beb0c01af10e1ce9233f5d2f814fcafa"}, + {file = "mmh3-5.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac7a391039aeab95810c2d020b69a94eb6b4b37d4e2374831e92db3a0cdf71c6"}, + {file = "mmh3-5.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3a2583b5521ca49756d8d8bceba80627a9cc295f255dcab4e3df7ccc2f09679a"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:081a8423fe53c1ac94f87165f3e4c500125d343410c1a0c5f1703e898a3ef038"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8b4d72713799755dc8954a7d36d5c20a6c8de7b233c82404d122c7c7c1707cc"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:389a6fd51efc76d3182d36ec306448559c1244f11227d2bb771bdd0e6cc91321"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39f4128edaa074bff721b1d31a72508cba4d2887ee7867f22082e1fe9d4edea0"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d5d23a94d91aabba3386b3769048d5f4210fdfef80393fece2f34ba5a7b466c"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:16347d038361f8b8f24fd2b7ef378c9b68ddee9f7706e46269b6e0d322814713"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6e299408565af7d61f2d20a5ffdd77cf2ed902460fe4e6726839d59ba4b72316"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:42050af21ddfc5445ee5a66e73a8fc758c71790305e3ee9e4a85a8e69e810f94"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2ae9b1f5ef27ec54659920f0404b7ceb39966e28867c461bfe83a05e8d18ddb0"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:50c2495a02045f3047d71d4ae9cdd7a15efc0bcbb7ff17a18346834a8e2d1d19"}, + {file = "mmh3-5.0.1-cp310-cp310-win32.whl", hash = "sha256:c028fa77cddf351ca13b4a56d43c1775652cde0764cadb39120b68f02a23ecf6"}, + {file = "mmh3-5.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:c5e741e421ec14400c4aae30890515c201f518403bdef29ae1e00d375bb4bbb5"}, + {file = "mmh3-5.0.1-cp310-cp310-win_arm64.whl", hash = "sha256:b17156d56fabc73dbf41bca677ceb6faed435cc8544f6566d72ea77d8a17e9d0"}, + {file = "mmh3-5.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a6d5a9b1b923f1643559ba1fc0bf7a5076c90cbb558878d3bf3641ce458f25d"}, + {file = "mmh3-5.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3349b968be555f7334bbcce839da98f50e1e80b1c615d8e2aa847ea4a964a012"}, + {file = "mmh3-5.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1bd3c94b110e55db02ab9b605029f48a2f7f677c6e58c09d44e42402d438b7e1"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47ba84d48608f79adbb10bb09986b6dc33eeda5c2d1bd75d00820081b73bde9"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0217987a8b8525c8d9170f66d036dec4ab45cfbd53d47e8d76125791ceb155e"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2797063a34e78d1b61639a98b0edec1c856fa86ab80c7ec859f1796d10ba429"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8bba16340adcbd47853a2fbe5afdb397549e8f2e79324ff1dced69a3f8afe7c3"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:282797957c9f60b51b9d768a602c25f579420cc9af46feb77d457a27823d270a"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e4fb670c29e63f954f9e7a2cdcd57b36a854c2538f579ef62681ccbaa1de2b69"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ee7d85438dc6aff328e19ab052086a3c29e8a9b632998a49e5c4b0034e9e8d6"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b7fb5db231f3092444bc13901e6a8d299667126b00636ffbad4a7b45e1051e2f"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c100dd441703da5ec136b1d9003ed4a041d8a1136234c9acd887499796df6ad8"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:71f3b765138260fd7a7a2dba0ea5727dabcd18c1f80323c9cfef97a7e86e01d0"}, + {file = "mmh3-5.0.1-cp311-cp311-win32.whl", hash = "sha256:9a76518336247fd17689ce3ae5b16883fd86a490947d46a0193d47fb913e26e3"}, + {file = "mmh3-5.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:336bc4df2e44271f1c302d289cc3d78bd52d3eed8d306c7e4bff8361a12bf148"}, + {file = "mmh3-5.0.1-cp311-cp311-win_arm64.whl", hash = "sha256:af6522722fbbc5999aa66f7244d0986767a46f1fb05accc5200f75b72428a508"}, + {file = "mmh3-5.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f2730bb263ed9c388e8860438b057a53e3cc701134a6ea140f90443c4c11aa40"}, + {file = "mmh3-5.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6246927bc293f6d56724536400b85fb85f5be26101fa77d5f97dd5e2a4c69bf2"}, + {file = "mmh3-5.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fbca322519a6e6e25b6abf43e940e1667cf8ea12510e07fb4919b48a0cd1c411"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae8c19903ed8a1724ad9e67e86f15d198a7a1271a4f9be83d47e38f312ed672"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a09fd6cc72c07c0c07c3357714234b646d78052487c4a3bd5f7f6e08408cff60"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ff8551fee7ae3b11c5d986b6347ade0dccaadd4670ffdb2b944dee120ffcc84"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e39694c73a5a20c8bf36dfd8676ed351e5234d55751ba4f7562d85449b21ef3f"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eba6001989a92f72a89c7cf382fda831678bd780707a66b4f8ca90239fdf2123"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0771f90c9911811cc606a5c7b7b58f33501c9ee896ed68a6ac22c7d55878ecc0"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:09b31ed0c0c0920363e96641fac4efde65b1ab62b8df86293142f35a254e72b4"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5cf4a8deda0235312db12075331cb417c4ba163770edfe789bde71d08a24b692"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:41f7090a95185ef20ac018581a99337f0cbc84a2135171ee3290a9c0d9519585"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b97b5b368fb7ff22194ec5854f5b12d8de9ab67a0f304728c7f16e5d12135b76"}, + {file = "mmh3-5.0.1-cp312-cp312-win32.whl", hash = "sha256:842516acf04da546f94fad52db125ee619ccbdcada179da51c326a22c4578cb9"}, + {file = "mmh3-5.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:d963be0dbfd9fca209c17172f6110787ebf78934af25e3694fe2ba40e55c1e2b"}, + {file = "mmh3-5.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:a5da292ceeed8ce8e32b68847261a462d30fd7b478c3f55daae841404f433c15"}, + {file = "mmh3-5.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:673e3f1c8d4231d6fb0271484ee34cb7146a6499fc0df80788adb56fd76842da"}, + {file = "mmh3-5.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f795a306bd16a52ad578b663462cc8e95500b3925d64118ae63453485d67282b"}, + {file = "mmh3-5.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5ed57a5e28e502a1d60436cc25c76c3a5ba57545f250f2969af231dc1221e0a5"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:632c28e7612e909dbb6cbe2fe496201ada4695b7715584005689c5dc038e59ad"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53fd6bd525a5985e391c43384672d9d6b317fcb36726447347c7fc75bfed34ec"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dceacf6b0b961a0e499836af3aa62d60633265607aef551b2a3e3c48cdaa5edd"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f0738d478fdfb5d920f6aff5452c78f2c35b0eff72caa2a97dfe38e82f93da2"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e70285e7391ab88b872e5bef632bad16b9d99a6d3ca0590656a4753d55988af"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:27e5fc6360aa6b828546a4318da1a7da6bf6e5474ccb053c3a6aa8ef19ff97bd"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7989530c3c1e2c17bf5a0ec2bba09fd19819078ba90beedabb1c3885f5040b0d"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:cdad7bee649950da7ecd3cbbbd12fb81f1161072ecbdb5acfa0018338c5cb9cf"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e143b8f184c1bb58cecd85ab4a4fd6dc65a2d71aee74157392c3fddac2a4a331"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e5eb12e886f3646dd636f16b76eb23fc0c27e8ff3c1ae73d4391e50ef60b40f6"}, + {file = "mmh3-5.0.1-cp313-cp313-win32.whl", hash = "sha256:16e6dddfa98e1c2d021268e72c78951234186deb4df6630e984ac82df63d0a5d"}, + {file = "mmh3-5.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:d3ffb792d70b8c4a2382af3598dad6ae0c5bd9cee5b7ffcc99aa2f5fd2c1bf70"}, + {file = "mmh3-5.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:122fa9ec148383f9124292962bda745f192b47bfd470b2af5fe7bb3982b17896"}, + {file = "mmh3-5.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b12bad8c75e6ff5d67319794fb6a5e8c713826c818d47f850ad08b4aa06960c6"}, + {file = "mmh3-5.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e5bbb066538c1048d542246fc347bb7994bdda29a3aea61c22f9f8b57111ce69"}, + {file = "mmh3-5.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:eee6134273f64e2a106827cc8fd77e70cc7239a285006fc6ab4977d59b015af2"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d04d9aa19d48e4c7bbec9cabc2c4dccc6ff3b2402f856d5bf0de03e10f167b5b"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79f37da1eed034d06567a69a7988456345c7f29e49192831c3975b464493b16e"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:242f77666743337aa828a2bf2da71b6ba79623ee7f93edb11e009f69237c8561"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffd943fff690463945f6441a2465555b3146deaadf6a5e88f2590d14c655d71b"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565b15f8d7df43acb791ff5a360795c20bfa68bca8b352509e0fbabd06cc48cd"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fc6aafb867c2030df98ac7760ff76b500359252867985f357bd387739f3d5287"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:32898170644d45aa27c974ab0d067809c066205110f5c6d09f47d9ece6978bfe"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:42865567838d2193eb64e0ef571f678bf361a254fcdef0c5c8e73243217829bd"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5ff5c1f301c4a8b6916498969c0fcc7e3dbc56b4bfce5cfe3fe31f3f4609e5ae"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:be74c2dda8a6f44a504450aa2c3507f8067a159201586fc01dd41ab80efc350f"}, + {file = "mmh3-5.0.1-cp38-cp38-win32.whl", hash = "sha256:5610a842621ff76c04b20b29cf5f809b131f241a19d4937971ba77dc99a7f330"}, + {file = "mmh3-5.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:de15739ac50776fe8aa1ef13f1be46a6ee1fbd45f6d0651084097eb2be0a5aa4"}, + {file = "mmh3-5.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:48e84cf3cc7e8c41bc07de72299a73b92d9e3cde51d97851420055b1484995f7"}, + {file = "mmh3-5.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6dd9dc28c2d168c49928195c2e29b96f9582a5d07bd690a28aede4cc07b0e696"}, + {file = "mmh3-5.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2771a1c56a3d4bdad990309cff5d0a8051f29c8ec752d001f97d6392194ae880"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5ff2a8322ba40951a84411550352fba1073ce1c1d1213bb7530f09aed7f8caf"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a16bd3ec90682c9e0a343e6bd4c778c09947c8c5395cdb9e5d9b82b2559efbca"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d45733a78d68b5b05ff4a823aea51fa664df1d3bf4929b152ff4fd6dea2dd69b"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:904285e83cedebc8873b0838ed54c20f7344120be26e2ca5a907ab007a18a7a0"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac4aeb1784e43df728034d0ed72e4b2648db1a69fef48fa58e810e13230ae5ff"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cb3d4f751a0b8b4c8d06ef1c085216c8fddcc8b8c8d72445976b5167a40c6d1e"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8021851935600e60c42122ed1176399d7692df338d606195cd599d228a04c1c6"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:6182d5924a5efc451900f864cbb021d7e8ad5d524816ca17304a0f663bc09bb5"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:5f30b834552a4f79c92e3d266336fb87fd92ce1d36dc6813d3e151035890abbd"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cd4383f35e915e06d077df27e04ffd3be7513ec6a9de2d31f430393f67e192a7"}, + {file = "mmh3-5.0.1-cp39-cp39-win32.whl", hash = "sha256:1455fb6b42665a97db8fc66e89a861e52b567bce27ed054c47877183f86ea6e3"}, + {file = "mmh3-5.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:9e26a0f4eb9855a143f5938a53592fa14c2d3b25801c2106886ab6c173982780"}, + {file = "mmh3-5.0.1-cp39-cp39-win_arm64.whl", hash = "sha256:0d0a35a69abdad7549c4030a714bb4ad07902edb3bbe61e1bbc403ded5d678be"}, + {file = "mmh3-5.0.1.tar.gz", hash = "sha256:7dab080061aeb31a6069a181f27c473a1f67933854e36a3464931f2716508896"}, +] + +[package.extras] +benchmark = ["pymmh3 (==0.0.5)", "pyperf (==2.7.0)", "xxhash (==3.5.0)"] +docs = ["myst-parser (==4.0.0)", "shibuya (==2024.8.30)", "sphinx (==8.0.2)", "sphinx-copybutton (==0.5.2)"] +lint = ["black (==24.8.0)", "clang-format (==18.1.8)", "isort (==5.13.2)", "pylint (==3.2.7)"] +plot = ["matplotlib (==3.9.2)", "pandas (==2.2.2)"] +test = ["pytest (==8.3.3)", "pytest-sugar (==1.0.0)"] +type = ["mypy (==1.11.2)"] + +[[package]] +name = "mmh3" +version = "5.1.0" +description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"}, + {file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"}, + {file = "mmh3-5.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d4ba8cac21e1f2d4e436ce03a82a7f87cda80378691f760e9ea55045ec480a3d"}, + {file = "mmh3-5.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d69281c281cb01994f054d862a6bb02a2e7acfe64917795c58934b0872b9ece4"}, + {file = "mmh3-5.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d05ed3962312fbda2a1589b97359d2467f677166952f6bd410d8c916a55febf"}, + {file = "mmh3-5.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78ae6a03f4cff4aa92ddd690611168856f8c33a141bd3e5a1e0a85521dc21ea0"}, + {file = "mmh3-5.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95f983535b39795d9fb7336438faae117424c6798f763d67c6624f6caf2c4c01"}, + {file = "mmh3-5.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d46fdd80d4c7ecadd9faa6181e92ccc6fe91c50991c9af0e371fdf8b8a7a6150"}, + {file = "mmh3-5.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0f16e976af7365ea3b5c425124b2a7f0147eed97fdbb36d99857f173c8d8e096"}, + {file = "mmh3-5.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6fa97f7d1e1f74ad1565127229d510f3fd65d931fdedd707c1e15100bc9e5ebb"}, + {file = "mmh3-5.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4052fa4a8561bd62648e9eb993c8f3af3bdedadf3d9687aa4770d10e3709a80c"}, + {file = "mmh3-5.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:3f0e8ae9f961037f812afe3cce7da57abf734285961fffbeff9a4c011b737732"}, + {file = "mmh3-5.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99297f207db967814f1f02135bb7fe7628b9eacb046134a34e1015b26b06edce"}, + {file = "mmh3-5.1.0-cp310-cp310-win32.whl", hash = "sha256:2e6c8dc3631a5e22007fbdb55e993b2dbce7985c14b25b572dd78403c2e79182"}, + {file = "mmh3-5.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:e4e8c7ad5a4dddcfde35fd28ef96744c1ee0f9d9570108aa5f7e77cf9cfdf0bf"}, + {file = "mmh3-5.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:45da549269883208912868a07d0364e1418d8292c4259ca11699ba1b2475bd26"}, + {file = "mmh3-5.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0b529dcda3f951ff363a51d5866bc6d63cf57f1e73e8961f864ae5010647079d"}, + {file = "mmh3-5.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db1079b3ace965e562cdfc95847312f9273eb2ad3ebea983435c8423e06acd7"}, + {file = "mmh3-5.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:22d31e3a0ff89b8eb3b826d6fc8e19532998b2aa6b9143698043a1268da413e1"}, + {file = "mmh3-5.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2139bfbd354cd6cb0afed51c4b504f29bcd687a3b1460b7e89498329cc28a894"}, + {file = "mmh3-5.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c8105c6a435bc2cd6ea2ef59558ab1a2976fd4a4437026f562856d08996673a"}, + {file = "mmh3-5.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57730067174a7f36fcd6ce012fe359bd5510fdaa5fe067bc94ed03e65dafb769"}, + {file = "mmh3-5.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bde80eb196d7fdc765a318604ded74a4378f02c5b46c17aa48a27d742edaded2"}, + {file = "mmh3-5.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9c8eddcb441abddeb419c16c56fd74b3e2df9e57f7aa2903221996718435c7a"}, + {file = "mmh3-5.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:99e07e4acafbccc7a28c076a847fb060ffc1406036bc2005acb1b2af620e53c3"}, + {file = "mmh3-5.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e25ba5b530e9a7d65f41a08d48f4b3fedc1e89c26486361166a5544aa4cad33"}, + {file = "mmh3-5.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bb9bf7475b4d99156ce2f0cf277c061a17560c8c10199c910a680869a278ddc7"}, + {file = "mmh3-5.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2a1b0878dd281ea3003368ab53ff6f568e175f1b39f281df1da319e58a19c23a"}, + {file = "mmh3-5.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:25f565093ac8b8aefe0f61f8f95c9a9d11dd69e6a9e9832ff0d293511bc36258"}, + {file = "mmh3-5.1.0-cp311-cp311-win32.whl", hash = "sha256:1e3554d8792387eac73c99c6eaea0b3f884e7130eb67986e11c403e4f9b6d372"}, + {file = "mmh3-5.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:8ad777a48197882492af50bf3098085424993ce850bdda406a358b6ab74be759"}, + {file = "mmh3-5.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f29dc4efd99bdd29fe85ed6c81915b17b2ef2cf853abf7213a48ac6fb3eaabe1"}, + {file = "mmh3-5.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:45712987367cb9235026e3cbf4334670522a97751abfd00b5bc8bfa022c3311d"}, + {file = "mmh3-5.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b1020735eb35086ab24affbea59bb9082f7f6a0ad517cb89f0fc14f16cea4dae"}, + {file = "mmh3-5.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:babf2a78ce5513d120c358722a2e3aa7762d6071cd10cede026f8b32452be322"}, + {file = "mmh3-5.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4f47f58cd5cbef968c84a7c1ddc192fef0a36b48b0b8a3cb67354531aa33b00"}, + {file = "mmh3-5.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2044a601c113c981f2c1e14fa33adc9b826c9017034fe193e9eb49a6882dbb06"}, + {file = "mmh3-5.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c94d999c9f2eb2da44d7c2826d3fbffdbbbbcde8488d353fee7c848ecc42b968"}, + {file = "mmh3-5.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a015dcb24fa0c7a78f88e9419ac74f5001c1ed6a92e70fd1803f74afb26a4c83"}, + {file = "mmh3-5.1.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:457da019c491a2d20e2022c7d4ce723675e4c081d9efc3b4d8b9f28a5ea789bd"}, + {file = "mmh3-5.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:71408579a570193a4ac9c77344d68ddefa440b00468a0b566dcc2ba282a9c559"}, + {file = "mmh3-5.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:8b3a04bc214a6e16c81f02f855e285c6df274a2084787eeafaa45f2fbdef1b63"}, + {file = "mmh3-5.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:832dae26a35514f6d3c1e267fa48e8de3c7b978afdafa0529c808ad72e13ada3"}, + {file = "mmh3-5.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bf658a61fc92ef8a48945ebb1076ef4ad74269e353fffcb642dfa0890b13673b"}, + {file = "mmh3-5.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3313577453582b03383731b66447cdcdd28a68f78df28f10d275d7d19010c1df"}, + {file = "mmh3-5.1.0-cp312-cp312-win32.whl", hash = "sha256:1d6508504c531ab86c4424b5a5ff07c1132d063863339cf92f6657ff7a580f76"}, + {file = "mmh3-5.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:aa75981fcdf3f21759d94f2c81b6a6e04a49dfbcdad88b152ba49b8e20544776"}, + {file = "mmh3-5.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:a4c1a76808dfea47f7407a0b07aaff9087447ef6280716fd0783409b3088bb3c"}, + {file = "mmh3-5.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a523899ca29cfb8a5239618474a435f3d892b22004b91779fcb83504c0d5b8c"}, + {file = "mmh3-5.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:17cef2c3a6ca2391ca7171a35ed574b5dab8398163129a3e3a4c05ab85a4ff40"}, + {file = "mmh3-5.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:52e12895b30110f3d89dae59a888683cc886ed0472dd2eca77497edef6161997"}, + {file = "mmh3-5.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d6719045cda75c3f40397fc24ab67b18e0cb8f69d3429ab4c39763c4c608dd"}, + {file = "mmh3-5.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d19fa07d303a91f8858982c37e6939834cb11893cb3ff20e6ee6fa2a7563826a"}, + {file = "mmh3-5.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31b47a620d622fbde8ca1ca0435c5d25de0ac57ab507209245e918128e38e676"}, + {file = "mmh3-5.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00f810647c22c179b6821079f7aa306d51953ac893587ee09cf1afb35adf87cb"}, + {file = "mmh3-5.1.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6128b610b577eed1e89ac7177ab0c33d06ade2aba93f5c89306032306b5f1c6"}, + {file = "mmh3-5.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1e550a45d2ff87a1c11b42015107f1778c93f4c6f8e731bf1b8fa770321b8cc4"}, + {file = "mmh3-5.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:785ae09276342f79fd8092633e2d52c0f7c44d56e8cfda8274ccc9b76612dba2"}, + {file = "mmh3-5.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0f4be3703a867ef976434afd3661a33884abe73ceb4ee436cac49d3b4c2aaa7b"}, + {file = "mmh3-5.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e513983830c4ff1f205ab97152a0050cf7164f1b4783d702256d39c637b9d107"}, + {file = "mmh3-5.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9135c300535c828c0bae311b659f33a31c941572eae278568d1a953c4a57b59"}, + {file = "mmh3-5.1.0-cp313-cp313-win32.whl", hash = "sha256:c65dbd12885a5598b70140d24de5839551af5a99b29f9804bb2484b29ef07692"}, + {file = "mmh3-5.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:10db7765201fc65003fa998faa067417ef6283eb5f9bba8f323c48fd9c33e91f"}, + {file = "mmh3-5.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:b22fe2e54be81f6c07dcb36b96fa250fb72effe08aa52fbb83eade6e1e2d5fd7"}, + {file = "mmh3-5.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:166b67749a1d8c93b06f5e90576f1ba838a65c8e79f28ffd9dfafba7c7d0a084"}, + {file = "mmh3-5.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:adba83c7ba5cc8ea201ee1e235f8413a68e7f7b8a657d582cc6c6c9d73f2830e"}, + {file = "mmh3-5.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a61f434736106804eb0b1612d503c4e6eb22ba31b16e6a2f987473de4226fa55"}, + {file = "mmh3-5.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba9ce59816b30866093f048b3312c2204ff59806d3a02adee71ff7bd22b87554"}, + {file = "mmh3-5.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd51597bef1e503363b05cb579db09269e6e6c39d419486626b255048daf545b"}, + {file = "mmh3-5.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d51a1ed642d3fb37b8f4cab966811c52eb246c3e1740985f701ef5ad4cdd2145"}, + {file = "mmh3-5.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:709bfe81c53bf8a3609efcbd65c72305ade60944f66138f697eefc1a86b6e356"}, + {file = "mmh3-5.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e01a9b0092b6f82e861137c8e9bb9899375125b24012eb5219e61708be320032"}, + {file = "mmh3-5.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:27e46a2c13c9a805e03c9ec7de0ca8e096794688ab2125bdce4229daf60c4a56"}, + {file = "mmh3-5.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5766299c1d26f6bfd0a638e070bd17dbd98d4ccb067d64db3745bf178e700ef0"}, + {file = "mmh3-5.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:7785205e3e4443fdcbb73766798c7647f94c2f538b90f666688f3e757546069e"}, + {file = "mmh3-5.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8e574fbd39afb433b3ab95683b1b4bf18313dc46456fc9daaddc2693c19ca565"}, + {file = "mmh3-5.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1b6727a5a20e32cbf605743749f3862abe5f5e097cbf2afc7be5aafd32a549ae"}, + {file = "mmh3-5.1.0-cp39-cp39-win32.whl", hash = "sha256:d6eaa711d4b9220fe5252032a44bf68e5dcfb7b21745a96efc9e769b0dd57ec2"}, + {file = "mmh3-5.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:49d444913f6c02980e5241a53fe9af2338f2043d6ce5b6f5ea7d302c52c604ac"}, + {file = "mmh3-5.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:0daaeaedd78773b70378f2413c7d6b10239a75d955d30d54f460fb25d599942d"}, + {file = "mmh3-5.1.0.tar.gz", hash = "sha256:136e1e670500f177f49ec106a4ebf0adf20d18d96990cc36ea492c651d2b406c"}, +] + +[package.extras] +benchmark = ["pymmh3 (==0.0.5)", "pyperf (==2.8.1)", "xxhash (==3.5.0)"] +docs = ["myst-parser (==4.0.0)", "shibuya (==2024.12.21)", "sphinx (==8.1.3)", "sphinx-copybutton (==0.5.2)"] +lint = ["black (==24.10.0)", "clang-format (==19.1.7)", "isort (==5.13.2)", "pylint (==3.3.3)"] +plot = ["matplotlib (==3.10.0)", "pandas (==2.2.3)"] +test = ["pytest (==8.3.4)", "pytest-sugar (==1.0.0)"] +type = ["mypy (==1.14.1)"] + +[[package]] +name = "modelscope" +version = "1.14.0" +description = "ModelScope: bring the notion of Model-as-a-Service to life." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "modelscope-1.14.0-py3-none-any.whl", hash = "sha256:d3c222c8c029f7a32341d869e753d26a6c32d92db911d3f9bba11660faf2e371"}, + {file = "modelscope-1.14.0.tar.gz", hash = "sha256:d4f4030acd5198d9a18c7d3314f983f73348056dea5f98380f304588113f9d18"}, +] + +[package.dependencies] +addict = "*" +attrs = "*" +datasets = ">=2.16.0,<2.19.0" +einops = "*" +filelock = ">=3.3.0" +gast = ">=0.2.2" +huggingface-hub = "*" +numpy = "*" +oss2 = "*" +pandas = "*" +Pillow = ">=6.2.0" +pyarrow = ">=6.0.0,<9.0.0 || >9.0.0" +python-dateutil = ">=2.1" +pyyaml = "*" +requests = ">=2.25" +scipy = "*" +setuptools = "*" +simplejson = ">=3.3.0" +sortedcontainers = ">=1.5.9" +tqdm = ">=4.64.0" +urllib3 = ">=1.26" +yapf = "*" + +[package.extras] +all = ["PyMCubes", "accelerate", "albumentations (>=1.0.3)", "av (>=9.2.0)", "biopython", "bmt-clipit (>=1.0)", "boto3", "chumpy", "clip (>=1.0)", "cloudpickle", "control-ldm", "ddpm-guided-diffusion", "decord (>=0.6.0)", "diffusers", "diffusers (>=0.25.0)", "easydict", "easyrobust", "edit-distance", "embeddings", "face-alignment (>=1.3.5)", "fairscale (>=0.4.1)", "fairseq", "fastai (>=1.0.51)", "ffmpeg (>=1.4)", "ffmpeg-python (>=0.2.0)", "filelock", "ftfy", "ftfy (>=6.0.3)", "fvcore", "imageio (>=2.9.0)", "imageio-ffmpeg (>=0.4.2)", "imgaug (>=0.4.0)", "iopath", "ipdb", "jieba (>=0.42.1)", "kornia (>=0.5.0)", "lap", "librosa (==0.10.1)", "lmdb", "lpips", "matplotlib", "megatron-util", "ml-collections", "mmcls (>=0.21.0)", "mmdet (>=2.25.0,<=2.28.2)", "mmdet3d (==1.0.0a1)", "mmsegmentation (<=0.30.0)", "moviepy (>=1.0.3)", "nerfacc (==0.2.2)", "networkx", "nltk", "numba", "omegaconf", "onnx", "onnxruntime (>=1.10)", "onnxsim", "open-clip-torch (>=2.7.0)", "opencv-python", "paint-ldm", "pandas", "panopticapi", "plyfile (>=0.7.4)", "protobuf (>=3.19.0,<3.21.0)", "psutil", "pyclipper", "pycocoevalcap (>=1.2)", "pycocotools (>=2.0.4)", "pydot", "pythainlp", "pytorch-lightning", "pytorch-lightning (<=1.7.7)", "pyvi", "rapidfuzz", "regex", "rouge", "rouge-score (<=0.0.4)", "sacrebleu", "sacremoses (>=0.0.41)", "safetensors", "scikit-image (>=0.19.3,<0.20.0)", "scikit-learn", "scikit-learn (>=0.20.1)", "scipy", "sentencepiece", "seqeval", "shapely", "shotdetect-scenedetect-lgss (>=0.0.4)", "smplx", "soundfile", "spacy (>=2.3.5)", "stanza", "subword-nmt (>=0.3.8)", "taming-transformers-rom1504", "tensorboardX", "tensorflow-estimator (>=1.15.1)", "termcolor", "tf-slim", "thop", "timm", "timm (>=0.4.9)", "tokenizers", "torch-scatter", "torchmetrics (>=0.6.2)", "torchsummary (>=1.5.1)", "torchvision", "tqdm", "transformers (>=4.12.0)", "transformers (>=4.26.0)", "transformers (>=4.27.1)", "trimesh", "ujson", "unicodedata2", "utils", "videofeatures-clipit (>=1.0)", "yacs", "zhconv"] +audio = ["MinDAEC", "PyWavelets (>=1.0.0)", "SoundFile (>0.10)", "bitstring", "funasr (>=1.0.0)", "funcodec (>=0.2.0)", "greenlet (>=1.1.2)", "hdbscan", "hyperpyyaml", "inflect", "jedi (>=0.18.1)", "kaldiio", "kantts", "kwsbp (>=0.0.6)", "librosa (==0.10.1)", "lxml", "matplotlib", "mir-eval (>=0.7)", "msgpack (>=1.0.4)", "parso (>=0.8.3)", "pexpect (>=4.8.0)", "pickleshare (>=0.7.5)", "prompt-toolkit (>=3.0.30)", "protobuf", "ptflops", "ptyprocess (>=0.7.0)", "py-sound-connect (>=0.1)", "pygments (>=2.12.0)", "pysptk (>=0.1.15,<0.1.19)", "pytorch-wavelets", "rotary-embedding-torch (>=0.1.5)", "scikit-learn", "scipy", "sox", "speechbrain (>=0.5.12)", "tensorboardX", "torchaudio", "tqdm", "traitlets (>=5.3.0)", "ttsfrd (>=0.1.2)", "umap-learn", "unidecode", "wcwidth (>=0.2.5)"] +audio-asr = ["funasr (>=1.0.0)"] +audio-kws = ["SoundFile (>0.10)", "kaldiio", "kwsbp (>=0.0.6)", "matplotlib", "py-sound-connect (>=0.1)", "scipy", "tensorboardX"] +audio-signal = ["MinDAEC", "SoundFile (>0.10)", "hdbscan", "hyperpyyaml", "librosa (==0.10.1)", "mir-eval (>=0.7)", "rotary-embedding-torch (>=0.1.5)", "scipy", "speechbrain (>=0.5.12)", "torchaudio", "tqdm", "umap-learn"] +audio-tts = ["PyWavelets (>=1.0.0)", "bitstring", "greenlet (>=1.1.2)", "inflect", "jedi (>=0.18.1)", "kantts", "librosa (==0.10.1)", "lxml", "matplotlib", "msgpack (>=1.0.4)", "parso (>=0.8.3)", "pexpect (>=4.8.0)", "pickleshare (>=0.7.5)", "prompt-toolkit (>=3.0.30)", "protobuf", "ptflops", "ptyprocess (>=0.7.0)", "pygments (>=2.12.0)", "pysptk (>=0.1.15,<0.1.19)", "pytorch-wavelets", "scikit-learn", "sox", "tensorboardx", "tqdm", "traitlets (>=5.3.0)", "ttsfrd (>=0.1.2)", "unidecode", "wcwidth (>=0.2.5)"] +cv = ["PyMCubes", "accelerate", "albumentations (>=1.0.3)", "av (>=9.2.0)", "bmt-clipit (>=1.0)", "chumpy", "clip (>=1.0)", "control-ldm", "ddpm-guided-diffusion", "diffusers", "easydict", "easyrobust", "edit-distance", "face-alignment (>=1.3.5)", "fairscale (>=0.4.1)", "fastai (>=1.0.51)", "ffmpeg (>=1.4)", "ffmpeg-python (>=0.2.0)", "ftfy", "fvcore", "imageio (>=2.9.0)", "imageio-ffmpeg (>=0.4.2)", "imgaug (>=0.4.0)", "kornia (>=0.5.0)", "lap", "lmdb", "lpips", "ml-collections", "mmcls (>=0.21.0)", "mmdet (>=2.25.0,<=2.28.2)", "mmdet3d (==1.0.0a1)", "mmsegmentation (<=0.30.0)", "moviepy (>=1.0.3)", "nerfacc (==0.2.2)", "networkx", "numba", "omegaconf", "onnx", "onnxruntime (>=1.10)", "onnxsim", "open-clip-torch (>=2.7.0)", "opencv-python", "paint-ldm", "pandas", "panopticapi", "plyfile (>=0.7.4)", "psutil", "pyclipper", "pytorch-lightning", "regex", "scikit-image (>=0.19.3,<0.20.0)", "scikit-learn (>=0.20.1)", "shapely", "shotdetect-scenedetect-lgss (>=0.0.4)", "smplx", "tensorflow-estimator (>=1.15.1)", "tf-slim", "thop", "timm (>=0.4.9)", "torch-scatter", "torchmetrics (>=0.6.2)", "torchsummary (>=1.5.1)", "torchvision", "tqdm", "transformers (>=4.26.0)", "trimesh", "ujson", "utils", "videofeatures-clipit (>=1.0)", "yacs"] +multi-modal = ["accelerate", "cloudpickle", "decord (>=0.6.0)", "diffusers (>=0.25.0)", "fairseq", "ftfy (>=6.0.3)", "librosa (==0.10.1)", "opencv-python", "pycocoevalcap (>=1.2)", "pycocotools (>=2.0.4)", "pydot", "pytorch-lightning (<=1.7.7)", "rapidfuzz", "rouge-score (<=0.0.4)", "sacrebleu", "safetensors", "soundfile", "taming-transformers-rom1504", "timm", "tokenizers", "torchvision", "transformers (>=4.27.1)", "unicodedata2", "zhconv"] +nlp = ["boto3", "embeddings", "filelock", "ftfy", "jieba (>=0.42.1)", "matplotlib", "megatron-util", "nltk", "pandas", "protobuf (>=3.19.0,<3.21.0)", "pythainlp", "pyvi", "regex", "rouge", "sacremoses (>=0.0.41)", "scikit-learn", "sentencepiece", "seqeval", "spacy (>=2.3.5)", "stanza", "subword-nmt (>=0.3.8)", "termcolor", "tokenizers", "transformers (>=4.12.0)", "zhconv"] +science = ["biopython", "iopath", "ipdb", "lmdb", "ml-collections", "scipy", "tensorboardX", "tokenizers"] + +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4) ; platform_python_implementation != \"PyPy\""] +tests = ["pytest (>=4.6)"] + +[[package]] +name = "multidict" +version = "6.1.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "multidict" +version = "6.5.1" +description = "multidict implementation" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "multidict-6.5.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7b7d75cb5b90fa55700edbbdca12cd31f6b19c919e98712933c7a1c3c6c71b73"}, + {file = "multidict-6.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ad32e43e028276612bf5bab762677e7d131d2df00106b53de2efb2b8a28d5bce"}, + {file = "multidict-6.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0499cbc67c1b02ba333781798560c5b1e7cd03e9273b678c92c6de1b1657fac9"}, + {file = "multidict-6.5.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c78fc6bc1dd7a139dab7ee9046f79a2082dce9360e3899b762615d564e2e857"}, + {file = "multidict-6.5.1-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f369d6619b24da4df4a02455fea8641fe8324fc0100a3e0dcebc5bf55fa903f3"}, + {file = "multidict-6.5.1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:719af50a44ce9cf9ab15d829bf8cf146de486b4816284c17c3c9b9c9735abb8f"}, + {file = "multidict-6.5.1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:199a0a9b3de8bbeb6881460d32b857dc7abec94448aeb6d607c336628c53580a"}, + {file = "multidict-6.5.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fe09318a28b00c6f43180d0d889df1535e98fb2d93d25955d46945f8d5410d87"}, + {file = "multidict-6.5.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ab94923ae54385ed480e4ab19f10269ee60f3eabd0b35e2a5d1ba6dbf3b0cc27"}, + {file = "multidict-6.5.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:de2b253a3a90e1fa55eef5f9b3146bb5c722bd3400747112c9963404a2f5b9cf"}, + {file = "multidict-6.5.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:b3bd88c1bc1f749db6a1e1f01696c3498bc25596136eceebb45766d24a320b27"}, + {file = "multidict-6.5.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0ce8f0ea49e8f54203f7d80e083a7aa017dbcb6f2d76d674273e25144c8aa3d7"}, + {file = "multidict-6.5.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dc62c8ac1b73ec704ed1a05be0267358fd5c99d1952f30448db1637336635cf8"}, + {file = "multidict-6.5.1-cp310-cp310-win32.whl", hash = "sha256:7a365a579fb3e067943d0278474e14c2244c252f460b401ccbf49f962e7b70fa"}, + {file = "multidict-6.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:4b299a2ffed33ad0733a9d47805b538d59465f8439bfea44df542cfb285c4db2"}, + {file = "multidict-6.5.1-cp310-cp310-win_arm64.whl", hash = "sha256:ed98ac527278372251fbc8f5c6c41bdf64ded1db0e6e86f9b9622744306060f6"}, + {file = "multidict-6.5.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:153d7ff738d9b67b94418b112dc5a662d89d2fc26846a9e942f039089048c804"}, + {file = "multidict-6.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1d784c0a1974f00d87f632d0fb6b1078baf7e15d2d2d1408af92f54d120f136e"}, + {file = "multidict-6.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dedf667cded1cdac5bfd3f3c2ff30010f484faccae4e871cc8a9316d2dc27363"}, + {file = "multidict-6.5.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7cbf407313236a79ce9b8af11808c29756cfb9c9a49a7f24bb1324537eec174b"}, + {file = "multidict-6.5.1-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2bf0068fe9abb0ebed1436a4e415117386951cf598eb8146ded4baf8e1ff6d1e"}, + {file = "multidict-6.5.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:195882f2f6272dacc88194ecd4de3608ad0ee29b161e541403b781a5f5dd346f"}, + {file = "multidict-6.5.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5776f9d2c3a1053f022f744af5f467c2f65b40d4cc00082bcf70e8c462c7dbad"}, + {file = "multidict-6.5.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a266373c604e49552d295d9f8ec4fd59bd364f2dd73eb18e7d36d5533b88f45"}, + {file = "multidict-6.5.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:79101d58094419b6e8d07e24946eba440136b9095590271cd6ccc4a90674a57d"}, + {file = "multidict-6.5.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:62eb76be8c20d9017a82b74965db93ddcf472b929b6b2b78c56972c73bacf2e4"}, + {file = "multidict-6.5.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:70c742357dd6207be30922207f8d59c91e2776ddbefa23830c55c09020e59f8a"}, + {file = "multidict-6.5.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:29eff1c9a905e298e9cd29f856f77485e58e59355f0ee323ac748203e002bbd3"}, + {file = "multidict-6.5.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:090e0b37fde199b58ea050c472c21dc8a3fbf285f42b862fe1ff02aab8942239"}, + {file = "multidict-6.5.1-cp311-cp311-win32.whl", hash = "sha256:6037beca8cb481307fb586ee0b73fae976a3e00d8f6ad7eb8af94a878a4893f0"}, + {file = "multidict-6.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:b632c1e4a2ff0bb4c1367d6c23871aa95dbd616bf4a847034732a142bb6eea94"}, + {file = "multidict-6.5.1-cp311-cp311-win_arm64.whl", hash = "sha256:2ec3aa63f0c668f591d43195f8e555f803826dee34208c29ade9d63355f9e095"}, + {file = "multidict-6.5.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:48f95fe064f63d9601ef7a3dce2fc2a437d5fcc11bca960bc8be720330b13b6a"}, + {file = "multidict-6.5.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7b7b6e1ce9b61f721417c68eeeb37599b769f3b631e6b25c21f50f8f619420b9"}, + {file = "multidict-6.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8b83b055889bda09fc866c0a652cdb6c36eeeafc2858259c9a7171fe82df5773"}, + {file = "multidict-6.5.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b7bd4d655dc460c7aebb73b58ed1c074e85f7286105b012556cf0f25c6d1dba3"}, + {file = "multidict-6.5.1-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:aa6dcf25ced31cdce10f004506dbc26129f28a911b32ed10e54453a0842a6173"}, + {file = "multidict-6.5.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:059fb556c3e6ce1a168496f92ef139ad839a47f898eaa512b1d43e5e05d78c6b"}, + {file = "multidict-6.5.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f97680c839dd9fa208e9584b1c2a5f1224bd01d31961f7f7d94984408c4a6b9e"}, + {file = "multidict-6.5.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7710c716243525cc05cd038c6e09f1807ee0fef2510a6e484450712c389c8d7f"}, + {file = "multidict-6.5.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:83eb172b4856ffff2814bdcf9c7792c0439302faab1b31376817b067b26cd8f5"}, + {file = "multidict-6.5.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:562d4714fa43f6ebc043a657535e4575e7d6141a818c9b3055f0868d29a1a41b"}, + {file = "multidict-6.5.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:2d7def2fc47695c46a427b8f298fb5ace03d635c1fb17f30d6192c9a8fb69e70"}, + {file = "multidict-6.5.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:77bc8ab5c6bfe696eff564824e73a451fdeca22f3b960261750836cee02bcbfa"}, + {file = "multidict-6.5.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9eec51891d3c210948ead894ec1483d48748abec08db5ce9af52cc13fef37aee"}, + {file = "multidict-6.5.1-cp312-cp312-win32.whl", hash = "sha256:189f0c2bd1c0ae5509e453707d0e187e030c9e873a0116d1f32d1c870d0fc347"}, + {file = "multidict-6.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:e81f23b4b6f2a588f15d5cb554b2d8b482bb6044223d64b86bc7079cae9ebaad"}, + {file = "multidict-6.5.1-cp312-cp312-win_arm64.whl", hash = "sha256:79d13e06d5241f9c8479dfeaf0f7cce8f453a4a302c9a0b1fa9b1a6869ff7757"}, + {file = "multidict-6.5.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:98011312f36d1e496f15454a95578d1212bc2ffc25650a8484752b06d304fd9b"}, + {file = "multidict-6.5.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bae589fb902b47bd94e6f539b34eefe55a1736099f616f614ec1544a43f95b05"}, + {file = "multidict-6.5.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6eb3bf26cd94eb306e4bc776d0964cc67a7967e4ad9299309f0ff5beec3c62be"}, + {file = "multidict-6.5.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e5e1a5a99c72d1531501406fcc06b6bf699ebd079dacd6807bb43fc0ff260e5c"}, + {file = "multidict-6.5.1-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:38755bcba18720cb2338bea23a5afcff234445ee75fa11518f6130e22f2ab970"}, + {file = "multidict-6.5.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f42fef9bcba3c32fd4e4a23c5757fc807d218b249573aaffa8634879f95feb73"}, + {file = "multidict-6.5.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:071b962f4cc87469cda90c7cc1c077b76496878b39851d7417a3d994e27fe2c6"}, + {file = "multidict-6.5.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:627ba4b7ce7c0115981f0fd91921f5d101dfb9972622178aeef84ccce1c2bbf3"}, + {file = "multidict-6.5.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05dcaed3e5e54f0d0f99a39762b0195274b75016cbf246f600900305581cf1a2"}, + {file = "multidict-6.5.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:11f5ecf3e741a18c578d118ad257c5588ca33cc7c46d51c0487d7ae76f072c32"}, + {file = "multidict-6.5.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b948eb625411c20b15088fca862c51a39140b9cf7875b5fb47a72bb249fa2f42"}, + {file = "multidict-6.5.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fc993a96dfc8300befd03d03df46efdb1d8d5a46911b014e956a4443035f470d"}, + {file = "multidict-6.5.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ee2d333380f22d35a56c6461f4579cfe186e143cd0b010b9524ac027de2a34cd"}, + {file = "multidict-6.5.1-cp313-cp313-win32.whl", hash = "sha256:5891e3327e6a426ddd443c87339b967c84feb8c022dd425e0c025fa0fcd71e68"}, + {file = "multidict-6.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:fcdaa72261bff25fad93e7cb9bd7112bd4bac209148e698e380426489d8ed8a9"}, + {file = "multidict-6.5.1-cp313-cp313-win_arm64.whl", hash = "sha256:84292145303f354a35558e601c665cdf87059d87b12777417e2e57ba3eb98903"}, + {file = "multidict-6.5.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f8316e58db799a1972afbc46770dfaaf20b0847003ab80de6fcb9861194faa3f"}, + {file = "multidict-6.5.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3468f0db187aca59eb56e0aa9f7c8c5427bcb844ad1c86557b4886aeb4484d8"}, + {file = "multidict-6.5.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:228533a5f99f1248cd79f6470779c424d63bc3e10d47c82511c65cc294458445"}, + {file = "multidict-6.5.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527076fdf5854901b1246c589af9a8a18b4a308375acb0020b585f696a10c794"}, + {file = "multidict-6.5.1-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9a17a17bad5c22f43e6a6b285dd9c16b1e8f8428202cd9bc22adaac68d0bbfed"}, + {file = "multidict-6.5.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:efd1951edab4a6cb65108d411867811f2b283f4b972337fb4269e40142f7f6a6"}, + {file = "multidict-6.5.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c07d5f38b39acb4f8f61a7aa4166d140ed628245ff0441630df15340532e3b3c"}, + {file = "multidict-6.5.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a6605dc74cd333be279e1fcb568ea24f7bdf1cf09f83a77360ce4dd32d67f14"}, + {file = "multidict-6.5.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8d64e30ae9ba66ce303a567548a06d64455d97c5dff7052fe428d154274d7174"}, + {file = "multidict-6.5.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2fb5dde79a7f6d98ac5e26a4c9de77ccd2c5224a7ce89aeac6d99df7bbe06464"}, + {file = "multidict-6.5.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:8a0d22e8b07cf620e9aeb1582340d00f0031e6a1f3e39d9c2dcbefa8691443b4"}, + {file = "multidict-6.5.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:0120ed5cff2082c7a0ed62a8f80f4f6ac266010c722381816462f279bfa19487"}, + {file = "multidict-6.5.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3dea06ba27401c4b54317aa04791182dc9295e7aa623732dd459071a0e0f65db"}, + {file = "multidict-6.5.1-cp313-cp313t-win32.whl", hash = "sha256:93b21be44f3cfee3be68ed5cd8848a3c0420d76dbd12d74f7776bde6b29e5f33"}, + {file = "multidict-6.5.1-cp313-cp313t-win_amd64.whl", hash = "sha256:c5c18f8646a520cc34d00f65f9f6f77782b8a8c59fd8de10713e0de7f470b5d0"}, + {file = "multidict-6.5.1-cp313-cp313t-win_arm64.whl", hash = "sha256:eb27128141474a1d545f0531b496c7c2f1c4beff50cb5a828f36eb62fef16c67"}, + {file = "multidict-6.5.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:279a37cb9d04097bf1c6308d7495cb4dfbd8fb538301bfe464266b045dfeb1cd"}, + {file = "multidict-6.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8e63ac6adc668cfe52e29c00afe33c3b8dbec8e37b529aa83bf31ba4bad0c509"}, + {file = "multidict-6.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:36b138c6ec3aedaa975653ea90099efb22042bab31727dd4cd2921a64de46b25"}, + {file = "multidict-6.5.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:576a1887a5c5becbe4fb484d0bdf6ed8ec89e9c11770f8f3214fd127ba137b8b"}, + {file = "multidict-6.5.1-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a0f1890f9a05d038720a7c3b5d82467534495bcb6bbda929f6f0914977cc56d1"}, + {file = "multidict-6.5.1-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5125a9faed98738d7d6e23650bd8af70abb95628d011f57f70a4d8f349e6d073"}, + {file = "multidict-6.5.1-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e09d7852100bcc3e466e63478ee18c68cc4d2ca2a978f29b90d5e2ea814f7b3e"}, + {file = "multidict-6.5.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e615032b684a1d6faffe41d64cd896801bd3f2c1b642355e9b5d11fd8d40223e"}, + {file = "multidict-6.5.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70c0e51d55f9bc5e97de950c3b3e88f501b3ca2b3894f231f3957dd3985b4d54"}, + {file = "multidict-6.5.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a6523258f2eb24c91995ae64172c19cd73bacd5a7f2b0733676966c527ab08f8"}, + {file = "multidict-6.5.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:5e8fefd7c062b0657af2480d789dcb347450d17c7bd20b02303c25f1f59a33a7"}, + {file = "multidict-6.5.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3d749b10cc6acb2c0814df881910ffd8d8ab1ec54493585579b4a75f89fe86d6"}, + {file = "multidict-6.5.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22b47f7e76ebea0b802df9ed08165b1e6ab52b140c7180c3e740e6205b3781b3"}, + {file = "multidict-6.5.1-cp39-cp39-win32.whl", hash = "sha256:cc80c7e8f297484c4511e887c244adec9a7ed3a76826cb8dbc6183b717a37d1f"}, + {file = "multidict-6.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:253e5c41fcc02e2956ab276b5a702f3972db1e87c080be55e87ca31a2f4f8012"}, + {file = "multidict-6.5.1-cp39-cp39-win_arm64.whl", hash = "sha256:a9e15dfe441aec31e0fa78f497aca83f0ad992ca58782cbaba8220e5a87608fc"}, + {file = "multidict-6.5.1-py3-none-any.whl", hash = "sha256:895354f4a38f53a1df2cc3fa2223fa714cff2b079a9f018a76cad35e7f0f044c"}, + {file = "multidict-6.5.1.tar.gz", hash = "sha256:a835ea8103f4723915d7d621529c80ef48db48ae0c818afcabe0f95aa1febc3a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "multiprocess" +version = "0.70.16" +description = "better multiprocessing and multithreading in Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "multiprocess-0.70.16-pp310-pypy310_pp73-macosx_10_13_x86_64.whl", hash = "sha256:476887be10e2f59ff183c006af746cb6f1fd0eadcfd4ef49e605cbe2659920ee"}, + {file = "multiprocess-0.70.16-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d951bed82c8f73929ac82c61f01a7b5ce8f3e5ef40f5b52553b4f547ce2b08ec"}, + {file = "multiprocess-0.70.16-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37b55f71c07e2d741374998c043b9520b626a8dddc8b3129222ca4f1a06ef67a"}, + {file = "multiprocess-0.70.16-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba8c31889abf4511c7308a8c52bb4a30b9d590e7f58523302ba00237702ca054"}, + {file = "multiprocess-0.70.16-pp39-pypy39_pp73-macosx_10_13_x86_64.whl", hash = "sha256:0dfd078c306e08d46d7a8d06fb120313d87aa43af60d66da43ffff40b44d2f41"}, + {file = "multiprocess-0.70.16-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e7b9d0f307cd9bd50851afaac0dba2cb6c44449efff697df7c7645f7d3f2be3a"}, + {file = "multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02"}, + {file = "multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a"}, + {file = "multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e"}, + {file = "multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435"}, + {file = "multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3"}, + {file = "multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1"}, +] + +[package.dependencies] +dill = ">=0.3.8" + +[[package]] +name = "networkx" +version = "3.1" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, + {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, +] + +[package.extras] +default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "networkx" +version = "3.4.2" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version == \"3.10\"" +files = [ + {file = "networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f"}, + {file = "networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1"}, +] + +[package.extras] +default = ["matplotlib (>=3.7)", "numpy (>=1.24)", "pandas (>=2.0)", "scipy (>=1.10,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.5)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["intersphinx-registry", "myst-nb (>=1.1)", "numpydoc (>=1.8.0)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.15)", "sphinx (>=7.3)", "sphinx-gallery (>=0.16)", "texext (>=0.6.7)"] +example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "momepy (>=0.7.2)", "osmnx (>=1.9)", "scikit-learn (>=1.5)", "seaborn (>=0.13)"] +extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "networkx" +version = "3.5" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.11" +groups = ["main"] +markers = "python_version >= \"3.11\"" +files = [ + {file = "networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec"}, + {file = "networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037"}, +] + +[package.extras] +default = ["matplotlib (>=3.8)", "numpy (>=1.25)", "pandas (>=2.0)", "scipy (>=1.11.2)"] +developer = ["mypy (>=1.15)", "pre-commit (>=4.1)"] +doc = ["intersphinx-registry", "myst-nb (>=1.1)", "numpydoc (>=1.8.0)", "pillow (>=10)", "pydata-sphinx-theme (>=0.16)", "sphinx (>=8.0)", "sphinx-gallery (>=0.18)", "texext (>=0.6.7)"] +example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "momepy (>=0.7.2)", "osmnx (>=2.0.0)", "scikit-learn (>=1.5)", "seaborn (>=0.13)"] +extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)", "pytest-xdist (>=3.0)"] +test-extras = ["pytest-mpl", "pytest-randomly"] + +[[package]] +name = "numpy" +version = "1.24.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.18.1" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_nccl_cu12-2.18.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:1a6c4acefcbebfa6de320f412bf7866de856e786e0462326ba1bac40de0b5e71"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.9.86" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:e3f1171dbdc83c5932a45f0f4c99180a70de9bd2718c1ab77d14104f6d7147f9"}, + {file = "nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:994a05ef08ef4b0b299829cde613a424382aff7efb08a7172c1fa616cc3af2ca"}, + {file = "nvidia_nvjitlink_cu12-12.9.86-py3-none-win_amd64.whl", hash = "sha256:cc6fcec260ca843c10e34c936921a1c426b351753587fdd638e8cff7b16bb9db"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, +] + +[[package]] +name = "oauthlib" +version = "3.3.1" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1"}, + {file = "oauthlib-3.3.1.tar.gz", hash = "sha256:0f0f8aa759826a193cf66c12ea1af1637f87b9b4622d46e866952bb022e538c9"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + +[[package]] +name = "onnxruntime" +version = "1.16.1" +description = "ONNX Runtime is a runtime accelerator for Machine Learning models" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "onnxruntime-1.16.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:28b2c7f444b4119950b69370801cd66067f403d19cbaf2a444735d7c269cce4a"}, + {file = "onnxruntime-1.16.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c24e04f33e7899f6aebb03ed51e51d346c1f906b05c5569d58ac9a12d38a2f58"}, + {file = "onnxruntime-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fa93b166f2d97063dc9f33c5118c5729a4a5dd5617296b6dbef42f9047b3e81"}, + {file = "onnxruntime-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:042dd9201b3016ee18f8f8bc4609baf11ff34ca1ff489c0a46bcd30919bf883d"}, + {file = "onnxruntime-1.16.1-cp310-cp310-win32.whl", hash = "sha256:c20aa0591f305012f1b21aad607ed96917c86ae7aede4a4dd95824b3d124ceb7"}, + {file = "onnxruntime-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:5581873e578917bea76d6434ee7337e28195d03488dcf72d161d08e9398c6249"}, + {file = "onnxruntime-1.16.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:ef8c0c8abf5f309aa1caf35941380839dc5f7a2fa53da533be4a3f254993f120"}, + {file = "onnxruntime-1.16.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e680380bea35a137cbc3efd67a17486e96972901192ad3026ee79c8d8fe264f7"}, + {file = "onnxruntime-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e62cc38ce1a669013d0a596d984762dc9c67c56f60ecfeee0d5ad36da5863f6"}, + {file = "onnxruntime-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:025c7a4d57bd2e63b8a0f84ad3df53e419e3df1cc72d63184f2aae807b17c13c"}, + {file = "onnxruntime-1.16.1-cp311-cp311-win32.whl", hash = "sha256:9ad074057fa8d028df248b5668514088cb0937b6ac5954073b7fb9b2891ffc8c"}, + {file = "onnxruntime-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:d5e43a3478bffc01f817ecf826de7b25a2ca1bca8547d70888594ab80a77ad24"}, + {file = "onnxruntime-1.16.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:3aef4d70b0930e29a8943eab248cd1565664458d3a62b2276bd11181f28fd0a3"}, + {file = "onnxruntime-1.16.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55a7b843a57c8ca0c8ff169428137958146081d5d76f1a6dd444c4ffcd37c3c2"}, + {file = "onnxruntime-1.16.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c631af1941bf3b5f7d063d24c04aacce8cff0794e157c497e315e89ac5ad7b"}, + {file = "onnxruntime-1.16.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671f296c3d5c233f601e97a10ab5a1dd8e65ba35c7b7b0c253332aba9dff330"}, + {file = "onnxruntime-1.16.1-cp38-cp38-win32.whl", hash = "sha256:eb3802305023dd05e16848d4e22b41f8147247894309c0c27122aaa08793b3d2"}, + {file = "onnxruntime-1.16.1-cp38-cp38-win_amd64.whl", hash = "sha256:fecfb07443d09d271b1487f401fbdf1ba0c829af6fd4fe8f6af25f71190e7eb9"}, + {file = "onnxruntime-1.16.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:de3e12094234db6545c67adbf801874b4eb91e9f299bda34c62967ef0050960f"}, + {file = "onnxruntime-1.16.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ff723c2a5621b5e7103f3be84d5aae1e03a20621e72219dddceae81f65f240af"}, + {file = "onnxruntime-1.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14a7fb3073aaf6b462e3d7fb433320f7700558a8892e5021780522dc4574292a"}, + {file = "onnxruntime-1.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:963159f1f699b0454cd72fcef3276c8a1aab9389a7b301bcd8e320fb9d9e8597"}, + {file = "onnxruntime-1.16.1-cp39-cp39-win32.whl", hash = "sha256:85771adb75190db9364b25ddec353ebf07635b83eb94b64ed014f1f6d57a3857"}, + {file = "onnxruntime-1.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:d32d2b30799c1f950123c60ae8390818381fd5f88bdf3627eeca10071c155dc5"}, +] + +[package.dependencies] +coloredlogs = "*" +flatbuffers = "*" +numpy = ">=1.21.6" +packaging = "*" +protobuf = "*" +sympy = "*" + +[[package]] +name = "openai" +version = "0.28.1" +description = "Python client library for the OpenAI API" +optional = false +python-versions = ">=3.7.1" +groups = ["main"] +files = [ + {file = "openai-0.28.1-py3-none-any.whl", hash = "sha256:d18690f9e3d31eedb66b57b88c2165d760b24ea0a01f150dd3f068155088ce68"}, + {file = "openai-0.28.1.tar.gz", hash = "sha256:4be1dad329a65b4ce1a660fe6d5431b438f429b5855c883435f0f7fcb6d2dcc8"}, +] + +[package.dependencies] +aiohttp = "*" +requests = ">=2.20" +tqdm = "*" + +[package.extras] +datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] +dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] +embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] +wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] + +[[package]] +name = "opentelemetry-api" +version = "1.27.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, + {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<=8.4.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.27.0" +description = "OpenTelemetry Protobuf encoding" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_exporter_otlp_proto_common-1.27.0-py3-none-any.whl", hash = "sha256:675db7fffcb60946f3a5c43e17d1168a3307a94a930ecf8d2ea1f286f3d4f79a"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.27.0.tar.gz", hash = "sha256:159d27cf49f359e3798c4c3eb8da6ef4020e292571bd8c5604a2a573231dd5c8"}, +] + +[package.dependencies] +opentelemetry-proto = "1.27.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.27.0" +description = "OpenTelemetry Collector Protobuf over gRPC Exporter" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0-py3-none-any.whl", hash = "sha256:56b5bbd5d61aab05e300d9d62a6b3c134827bbd28d0b12f2649c2da368006c9e"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0.tar.gz", hash = "sha256:af6f72f76bcf425dfb5ad11c1a6d6eca2863b91e63575f89bb7b4b55099d968f"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +googleapis-common-protos = ">=1.52,<2.0" +grpcio = ">=1.0.0,<2.0.0" +opentelemetry-api = ">=1.15,<2.0" +opentelemetry-exporter-otlp-proto-common = "1.27.0" +opentelemetry-proto = "1.27.0" +opentelemetry-sdk = ">=1.27.0,<1.28.0" + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.48b0" +description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44"}, + {file = "opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.4,<2.0" +setuptools = ">=16.0" +wrapt = ">=1.0.0,<2.0.0" + +[[package]] +name = "opentelemetry-instrumentation-asgi" +version = "0.48b0" +description = "ASGI instrumentation for OpenTelemetry" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_instrumentation_asgi-0.48b0-py3-none-any.whl", hash = "sha256:ddb1b5fc800ae66e85a4e2eca4d9ecd66367a8c7b556169d9e7b57e10676e44d"}, + {file = "opentelemetry_instrumentation_asgi-0.48b0.tar.gz", hash = "sha256:04c32174b23c7fa72ddfe192dad874954968a6a924608079af9952964ecdf785"}, +] + +[package.dependencies] +asgiref = ">=3.0,<4.0" +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.48b0" +opentelemetry-semantic-conventions = "0.48b0" +opentelemetry-util-http = "0.48b0" + +[package.extras] +instruments = ["asgiref (>=3.0,<4.0)"] + +[[package]] +name = "opentelemetry-instrumentation-fastapi" +version = "0.48b0" +description = "OpenTelemetry FastAPI Instrumentation" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_instrumentation_fastapi-0.48b0-py3-none-any.whl", hash = "sha256:afeb820a59e139d3e5d96619600f11ce0187658b8ae9e3480857dd790bc024f2"}, + {file = "opentelemetry_instrumentation_fastapi-0.48b0.tar.gz", hash = "sha256:21a72563ea412c0b535815aeed75fc580240f1f02ebc72381cfab672648637a2"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.48b0" +opentelemetry-instrumentation-asgi = "0.48b0" +opentelemetry-semantic-conventions = "0.48b0" +opentelemetry-util-http = "0.48b0" + +[package.extras] +instruments = ["fastapi (>=0.58,<1.0)"] + +[[package]] +name = "opentelemetry-proto" +version = "1.27.0" +description = "OpenTelemetry Python Proto" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_proto-1.27.0-py3-none-any.whl", hash = "sha256:b133873de5581a50063e1e4b29cdcf0c5e253a8c2d8dc1229add20a4c3830ace"}, + {file = "opentelemetry_proto-1.27.0.tar.gz", hash = "sha256:33c9345d91dafd8a74fc3d7576c5a38f18b7fdf8d02983ac67485386132aedd6"}, +] + +[package.dependencies] +protobuf = ">=3.19,<5.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.27.0" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"}, + {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"}, +] + +[package.dependencies] +opentelemetry-api = "1.27.0" +opentelemetry-semantic-conventions = "0.48b0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.48b0" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"}, + {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +opentelemetry-api = "1.27.0" + +[[package]] +name = "opentelemetry-util-http" +version = "0.48b0" +description = "Web util for OpenTelemetry" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_util_http-0.48b0-py3-none-any.whl", hash = "sha256:76f598af93aab50328d2a69c786beaedc8b6a7770f7a818cc307eb353debfffb"}, + {file = "opentelemetry_util_http-0.48b0.tar.gz", hash = "sha256:60312015153580cc20f322e5cdc3d3ecad80a71743235bdb77716e742814623c"}, +] + +[[package]] +name = "orjson" +version = "3.10.15" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "orjson-3.10.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:552c883d03ad185f720d0c09583ebde257e41b9521b74ff40e08b7dec4559c04"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:616e3e8d438d02e4854f70bfdc03a6bcdb697358dbaa6bcd19cbe24d24ece1f8"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c2c79fa308e6edb0ffab0a31fd75a7841bf2a79a20ef08a3c6e3b26814c8ca8"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cb85490aa6bf98abd20607ab5c8324c0acb48d6da7863a51be48505646c814"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763dadac05e4e9d2bc14938a45a2d0560549561287d41c465d3c58aec818b164"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a330b9b4734f09a623f74a7490db713695e13b67c959713b78369f26b3dee6bf"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a61a4622b7ff861f019974f73d8165be1bd9a0855e1cad18ee167acacabeb061"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd271247691574416b3228db667b84775c497b245fa275c6ab90dc1ffbbd2b3"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:e4759b109c37f635aa5c5cc93a1b26927bfde24b254bcc0e1149a9fada253d2d"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e992fd5cfb8b9f00bfad2fd7a05a4299db2bbe92e6440d9dd2fab27655b3182"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f95fb363d79366af56c3f26b71df40b9a583b07bbaaf5b317407c4d58497852e"}, + {file = "orjson-3.10.15-cp310-cp310-win32.whl", hash = "sha256:f9875f5fea7492da8ec2444839dcc439b0ef298978f311103d0b7dfd775898ab"}, + {file = "orjson-3.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:17085a6aa91e1cd70ca8533989a18b5433e15d29c574582f76f821737c8d5806"}, + {file = "orjson-3.10.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c4cc83960ab79a4031f3119cc4b1a1c627a3dc09df125b27c4201dff2af7eaa6"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ddbeef2481d895ab8be5185f2432c334d6dec1f5d1933a9c83014d188e102cef"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9e590a0477b23ecd5b0ac865b1b907b01b3c5535f5e8a8f6ab0e503efb896334"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6be38bd103d2fd9bdfa31c2720b23b5d47c6796bcb1d1b598e3924441b4298d"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ff4f6edb1578960ed628a3b998fa54d78d9bb3e2eb2cfc5c2a09732431c678d0"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0482b21d0462eddd67e7fce10b89e0b6ac56570424662b685a0d6fccf581e13"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bb5cc3527036ae3d98b65e37b7986a918955f85332c1ee07f9d3f82f3a6899b5"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d569c1c462912acdd119ccbf719cf7102ea2c67dd03b99edcb1a3048651ac96b"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:1e6d33efab6b71d67f22bf2962895d3dc6f82a6273a965fab762e64fa90dc399"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c33be3795e299f565681d69852ac8c1bc5c84863c0b0030b2b3468843be90388"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eea80037b9fae5339b214f59308ef0589fc06dc870578b7cce6d71eb2096764c"}, + {file = "orjson-3.10.15-cp311-cp311-win32.whl", hash = "sha256:d5ac11b659fd798228a7adba3e37c010e0152b78b1982897020a8e019a94882e"}, + {file = "orjson-3.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:cf45e0214c593660339ef63e875f32ddd5aa3b4adc15e662cdb80dc49e194f8e"}, + {file = "orjson-3.10.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9d11c0714fc85bfcf36ada1179400862da3288fc785c30e8297844c867d7505a"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dba5a1e85d554e3897fa9fe6fbcff2ed32d55008973ec9a2b992bd9a65d2352d"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7723ad949a0ea502df656948ddd8b392780a5beaa4c3b5f97e525191b102fff0"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6fd9bc64421e9fe9bd88039e7ce8e58d4fead67ca88e3a4014b143cec7684fd4"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dadba0e7b6594216c214ef7894c4bd5f08d7c0135f4dd0145600be4fbcc16767"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48f59114fe318f33bbaee8ebeda696d8ccc94c9e90bc27dbe72153094e26f41"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:035fb83585e0f15e076759b6fedaf0abb460d1765b6a36f48018a52858443514"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d13b7fe322d75bf84464b075eafd8e7dd9eae05649aa2a5354cfa32f43c59f17"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:7066b74f9f259849629e0d04db6609db4cf5b973248f455ba5d3bd58a4daaa5b"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:88dc3f65a026bd3175eb157fea994fca6ac7c4c8579fc5a86fc2114ad05705b7"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b342567e5465bd99faa559507fe45e33fc76b9fb868a63f1642c6bc0735ad02a"}, + {file = "orjson-3.10.15-cp312-cp312-win32.whl", hash = "sha256:0a4f27ea5617828e6b58922fdbec67b0aa4bb844e2d363b9244c47fa2180e665"}, + {file = "orjson-3.10.15-cp312-cp312-win_amd64.whl", hash = "sha256:ef5b87e7aa9545ddadd2309efe6824bd3dd64ac101c15dae0f2f597911d46eaa"}, + {file = "orjson-3.10.15-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:bae0e6ec2b7ba6895198cd981b7cca95d1487d0147c8ed751e5632ad16f031a6"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f93ce145b2db1252dd86af37d4165b6faa83072b46e3995ecc95d4b2301b725a"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c203f6f969210128af3acae0ef9ea6aab9782939f45f6fe02d05958fe761ef9"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8918719572d662e18b8af66aef699d8c21072e54b6c82a3f8f6404c1f5ccd5e0"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f71eae9651465dff70aa80db92586ad5b92df46a9373ee55252109bb6b703307"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e117eb299a35f2634e25ed120c37c641398826c2f5a3d3cc39f5993b96171b9e"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13242f12d295e83c2955756a574ddd6741c81e5b99f2bef8ed8d53e47a01e4b7"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7946922ada8f3e0b7b958cc3eb22cfcf6c0df83d1fe5521b4a100103e3fa84c8"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:b7155eb1623347f0f22c38c9abdd738b287e39b9982e1da227503387b81b34ca"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:208beedfa807c922da4e81061dafa9c8489c6328934ca2a562efa707e049e561"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eca81f83b1b8c07449e1d6ff7074e82e3fd6777e588f1a6632127f286a968825"}, + {file = "orjson-3.10.15-cp313-cp313-win32.whl", hash = "sha256:c03cd6eea1bd3b949d0d007c8d57049aa2b39bd49f58b4b2af571a5d3833d890"}, + {file = "orjson-3.10.15-cp313-cp313-win_amd64.whl", hash = "sha256:fd56a26a04f6ba5fb2045b0acc487a63162a958ed837648c5781e1fe3316cfbf"}, + {file = "orjson-3.10.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5e8afd6200e12771467a1a44e5ad780614b86abb4b11862ec54861a82d677746"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da9a18c500f19273e9e104cca8c1f0b40a6470bcccfc33afcc088045d0bf5ea6"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb00b7bfbdf5d34a13180e4805d76b4567025da19a197645ca746fc2fb536586"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33aedc3d903378e257047fee506f11e0833146ca3e57a1a1fb0ddb789876c1e1"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd0099ae6aed5eb1fc84c9eb72b95505a3df4267e6962eb93cdd5af03be71c98"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c864a80a2d467d7786274fce0e4f93ef2a7ca4ff31f7fc5634225aaa4e9e98c"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c25774c9e88a3e0013d7d1a6c8056926b607a61edd423b50eb5c88fd7f2823ae"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e78c211d0074e783d824ce7bb85bf459f93a233eb67a5b5003498232ddfb0e8a"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:43e17289ffdbbac8f39243916c893d2ae41a2ea1a9cbb060a56a4d75286351ae"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:781d54657063f361e89714293c095f506c533582ee40a426cb6489c48a637b81"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6875210307d36c94873f553786a808af2788e362bd0cf4c8e66d976791e7b528"}, + {file = "orjson-3.10.15-cp38-cp38-win32.whl", hash = "sha256:305b38b2b8f8083cc3d618927d7f424349afce5975b316d33075ef0f73576b60"}, + {file = "orjson-3.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:5dd9ef1639878cc3efffed349543cbf9372bdbd79f478615a1c633fe4e4180d1"}, + {file = "orjson-3.10.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ffe19f3e8d68111e8644d4f4e267a069ca427926855582ff01fc012496d19969"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d433bf32a363823863a96561a555227c18a522a8217a6f9400f00ddc70139ae2"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da03392674f59a95d03fa5fb9fe3a160b0511ad84b7a3914699ea5a1b3a38da2"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a63bb41559b05360ded9132032239e47983a39b151af1201f07ec9370715c82"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3766ac4702f8f795ff3fa067968e806b4344af257011858cc3d6d8721588b53f"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a1c73dcc8fadbd7c55802d9aa093b36878d34a3b3222c41052ce6b0fc65f8e8"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b299383825eafe642cbab34be762ccff9fd3408d72726a6b2a4506d410a71ab3"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:abc7abecdbf67a173ef1316036ebbf54ce400ef2300b4e26a7b843bd446c2480"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:3614ea508d522a621384c1d6639016a5a2e4f027f3e4a1c93a51867615d28829"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:295c70f9dc154307777ba30fe29ff15c1bcc9dfc5c48632f37d20a607e9ba85a"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:63309e3ff924c62404923c80b9e2048c1f74ba4b615e7584584389ada50ed428"}, + {file = "orjson-3.10.15-cp39-cp39-win32.whl", hash = "sha256:a2f708c62d026fb5340788ba94a55c23df4e1869fec74be455e0b2f5363b8507"}, + {file = "orjson-3.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:efcf6c735c3d22ef60c4aa27a5238f1a477df85e9b15f2142f9d669beb2d13fd"}, + {file = "orjson-3.10.15.tar.gz", hash = "sha256:05ca7fe452a2e9d8d9d706a2984c95b9c2ebc5db417ce0b7a49b91d50642a23e"}, +] + +[[package]] +name = "orjson" +version = "3.10.18" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "orjson-3.10.18-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a45e5d68066b408e4bc383b6e4ef05e717c65219a9e1390abc6155a520cac402"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be3b9b143e8b9db05368b13b04c84d37544ec85bb97237b3a923f076265ec89c"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9b0aa09745e2c9b3bf779b096fa71d1cc2d801a604ef6dd79c8b1bfef52b2f92"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53a245c104d2792e65c8d225158f2b8262749ffe64bc7755b00024757d957a13"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9495ab2611b7f8a0a8a505bcb0f0cbdb5469caafe17b0e404c3c746f9900469"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73be1cbcebadeabdbc468f82b087df435843c809cd079a565fb16f0f3b23238f"}, + {file = "orjson-3.10.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8936ee2679e38903df158037a2f1c108129dee218975122e37847fb1d4ac68"}, + {file = "orjson-3.10.18-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7115fcbc8525c74e4c2b608129bef740198e9a120ae46184dac7683191042056"}, + {file = "orjson-3.10.18-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:771474ad34c66bc4d1c01f645f150048030694ea5b2709b87d3bda273ffe505d"}, + {file = "orjson-3.10.18-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:7c14047dbbea52886dd87169f21939af5d55143dad22d10db6a7514f058156a8"}, + {file = "orjson-3.10.18-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:641481b73baec8db14fdf58f8967e52dc8bda1f2aba3aa5f5c1b07ed6df50b7f"}, + {file = "orjson-3.10.18-cp310-cp310-win32.whl", hash = "sha256:607eb3ae0909d47280c1fc657c4284c34b785bae371d007595633f4b1a2bbe06"}, + {file = "orjson-3.10.18-cp310-cp310-win_amd64.whl", hash = "sha256:8770432524ce0eca50b7efc2a9a5f486ee0113a5fbb4231526d414e6254eba92"}, + {file = "orjson-3.10.18-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e0a183ac3b8e40471e8d843105da6fbe7c070faab023be3b08188ee3f85719b8"}, + {file = "orjson-3.10.18-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:5ef7c164d9174362f85238d0cd4afdeeb89d9e523e4651add6a5d458d6f7d42d"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afd14c5d99cdc7bf93f22b12ec3b294931518aa019e2a147e8aa2f31fd3240f7"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b672502323b6cd133c4af6b79e3bea36bad2d16bca6c1f645903fce83909a7a"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51f8c63be6e070ec894c629186b1c0fe798662b8687f3d9fdfa5e401c6bd7679"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9478ade5313d724e0495d167083c6f3be0dd2f1c9c8a38db9a9e912cdaf947"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:187aefa562300a9d382b4b4eb9694806e5848b0cedf52037bb5c228c61bb66d4"}, + {file = "orjson-3.10.18-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9da552683bc9da222379c7a01779bddd0ad39dd699dd6300abaf43eadee38334"}, + {file = "orjson-3.10.18-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e450885f7b47a0231979d9c49b567ed1c4e9f69240804621be87c40bc9d3cf17"}, + {file = "orjson-3.10.18-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:5e3c9cc2ba324187cd06287ca24f65528f16dfc80add48dc99fa6c836bb3137e"}, + {file = "orjson-3.10.18-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:50ce016233ac4bfd843ac5471e232b865271d7d9d44cf9d33773bcd883ce442b"}, + {file = "orjson-3.10.18-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b3ceff74a8f7ffde0b2785ca749fc4e80e4315c0fd887561144059fb1c138aa7"}, + {file = "orjson-3.10.18-cp311-cp311-win32.whl", hash = "sha256:fdba703c722bd868c04702cac4cb8c6b8ff137af2623bc0ddb3b3e6a2c8996c1"}, + {file = "orjson-3.10.18-cp311-cp311-win_amd64.whl", hash = "sha256:c28082933c71ff4bc6ccc82a454a2bffcef6e1d7379756ca567c772e4fb3278a"}, + {file = "orjson-3.10.18-cp311-cp311-win_arm64.whl", hash = "sha256:a6c7c391beaedd3fa63206e5c2b7b554196f14debf1ec9deb54b5d279b1b46f5"}, + {file = "orjson-3.10.18-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:50c15557afb7f6d63bc6d6348e0337a880a04eaa9cd7c9d569bcb4e760a24753"}, + {file = "orjson-3.10.18-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:356b076f1662c9813d5fa56db7d63ccceef4c271b1fb3dd522aca291375fcf17"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:559eb40a70a7494cd5beab2d73657262a74a2c59aff2068fdba8f0424ec5b39d"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f3c29eb9a81e2fbc6fd7ddcfba3e101ba92eaff455b8d602bf7511088bbc0eae"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6612787e5b0756a171c7d81ba245ef63a3533a637c335aa7fcb8e665f4a0966f"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ac6bd7be0dcab5b702c9d43d25e70eb456dfd2e119d512447468f6405b4a69c"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9f72f100cee8dde70100406d5c1abba515a7df926d4ed81e20a9730c062fe9ad"}, + {file = "orjson-3.10.18-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dca85398d6d093dd41dc0983cbf54ab8e6afd1c547b6b8a311643917fbf4e0c"}, + {file = "orjson-3.10.18-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:22748de2a07fcc8781a70edb887abf801bb6142e6236123ff93d12d92db3d406"}, + {file = "orjson-3.10.18-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:3a83c9954a4107b9acd10291b7f12a6b29e35e8d43a414799906ea10e75438e6"}, + {file = "orjson-3.10.18-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:303565c67a6c7b1f194c94632a4a39918e067bd6176a48bec697393865ce4f06"}, + {file = "orjson-3.10.18-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:86314fdb5053a2f5a5d881f03fca0219bfdf832912aa88d18676a5175c6916b5"}, + {file = "orjson-3.10.18-cp312-cp312-win32.whl", hash = "sha256:187ec33bbec58c76dbd4066340067d9ece6e10067bb0cc074a21ae3300caa84e"}, + {file = "orjson-3.10.18-cp312-cp312-win_amd64.whl", hash = "sha256:f9f94cf6d3f9cd720d641f8399e390e7411487e493962213390d1ae45c7814fc"}, + {file = "orjson-3.10.18-cp312-cp312-win_arm64.whl", hash = "sha256:3d600be83fe4514944500fa8c2a0a77099025ec6482e8087d7659e891f23058a"}, + {file = "orjson-3.10.18-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:69c34b9441b863175cc6a01f2935de994025e773f814412030f269da4f7be147"}, + {file = "orjson-3.10.18-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:1ebeda919725f9dbdb269f59bc94f861afbe2a27dce5608cdba2d92772364d1c"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5adf5f4eed520a4959d29ea80192fa626ab9a20b2ea13f8f6dc58644f6927103"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7592bb48a214e18cd670974f289520f12b7aed1fa0b2e2616b8ed9e069e08595"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f872bef9f042734110642b7a11937440797ace8c87527de25e0c53558b579ccc"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0315317601149c244cb3ecef246ef5861a64824ccbcb8018d32c66a60a84ffbc"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0da26957e77e9e55a6c2ce2e7182a36a6f6b180ab7189315cb0995ec362e049"}, + {file = "orjson-3.10.18-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb70d489bc79b7519e5803e2cc4c72343c9dc1154258adf2f8925d0b60da7c58"}, + {file = "orjson-3.10.18-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e9e86a6af31b92299b00736c89caf63816f70a4001e750bda179e15564d7a034"}, + {file = "orjson-3.10.18-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:c382a5c0b5931a5fc5405053d36c1ce3fd561694738626c77ae0b1dfc0242ca1"}, + {file = "orjson-3.10.18-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8e4b2ae732431127171b875cb2668f883e1234711d3c147ffd69fe5be51a8012"}, + {file = "orjson-3.10.18-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2d808e34ddb24fc29a4d4041dcfafbae13e129c93509b847b14432717d94b44f"}, + {file = "orjson-3.10.18-cp313-cp313-win32.whl", hash = "sha256:ad8eacbb5d904d5591f27dee4031e2c1db43d559edb8f91778efd642d70e6bea"}, + {file = "orjson-3.10.18-cp313-cp313-win_amd64.whl", hash = "sha256:aed411bcb68bf62e85588f2a7e03a6082cc42e5a2796e06e72a962d7c6310b52"}, + {file = "orjson-3.10.18-cp313-cp313-win_arm64.whl", hash = "sha256:f54c1385a0e6aba2f15a40d703b858bedad36ded0491e55d35d905b2c34a4cc3"}, + {file = "orjson-3.10.18-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c95fae14225edfd699454e84f61c3dd938df6629a00c6ce15e704f57b58433bb"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5232d85f177f98e0cefabb48b5e7f60cff6f3f0365f9c60631fecd73849b2a82"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2783e121cafedf0d85c148c248a20470018b4ffd34494a68e125e7d5857655d1"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e54ee3722caf3db09c91f442441e78f916046aa58d16b93af8a91500b7bbf273"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2daf7e5379b61380808c24f6fc182b7719301739e4271c3ec88f2984a2d61f89"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f39b371af3add20b25338f4b29a8d6e79a8c7ed0e9dd49e008228a065d07781"}, + {file = "orjson-3.10.18-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b819ed34c01d88c6bec290e6842966f8e9ff84b7694632e88341363440d4cc0"}, + {file = "orjson-3.10.18-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2f6c57debaef0b1aa13092822cbd3698a1fb0209a9ea013a969f4efa36bdea57"}, + {file = "orjson-3.10.18-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:755b6d61ffdb1ffa1e768330190132e21343757c9aa2308c67257cc81a1a6f5a"}, + {file = "orjson-3.10.18-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ce8d0a875a85b4c8579eab5ac535fb4b2a50937267482be402627ca7e7570ee3"}, + {file = "orjson-3.10.18-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57b5d0673cbd26781bebc2bf86f99dd19bd5a9cb55f71cc4f66419f6b50f3d77"}, + {file = "orjson-3.10.18-cp39-cp39-win32.whl", hash = "sha256:951775d8b49d1d16ca8818b1f20c4965cae9157e7b562a2ae34d3967b8f21c8e"}, + {file = "orjson-3.10.18-cp39-cp39-win_amd64.whl", hash = "sha256:fdd9d68f83f0bc4406610b1ac68bdcded8c5ee58605cc69e643a06f4d075f429"}, + {file = "orjson-3.10.18.tar.gz", hash = "sha256:e8da3947d92123eda795b68228cafe2724815621fe35e8e320a9e9593a4bcd53"}, +] + +[[package]] +name = "oss2" +version = "2.19.1" +description = "Aliyun OSS (Object Storage Service) SDK" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "oss2-2.19.1.tar.gz", hash = "sha256:a8ab9ee7eb99e88a7e1382edc6ea641d219d585a7e074e3776e9dec9473e59c1"}, +] + +[package.dependencies] +aliyun-python-sdk-core = ">=2.13.12" +aliyun-python-sdk-kms = ">=2.4.1" +crcmod = ">=1.7" +pycryptodome = ">=3.4.7" +requests = "!=2.9.0" +six = "*" + +[[package]] +name = "overrides" +version = "7.7.0" +description = "A decorator to automatically detect mismatch when overriding a method." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, + {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, +] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "pandas" +version = "2.0.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, + {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, + {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, + {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, + {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, + {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, + {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, + {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, + {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, + {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, +] + +[package.dependencies] +numpy = {version = ">=1.20.3", markers = "python_version < \"3.10\""} +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] + +[[package]] +name = "pandas" +version = "2.1.0" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.12\"" +files = [ + {file = "pandas-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:40dd20439ff94f1b2ed55b393ecee9cb6f3b08104c2c40b0cb7186a2f0046242"}, + {file = "pandas-2.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d4f38e4fedeba580285eaac7ede4f686c6701a9e618d8a857b138a126d067f2f"}, + {file = "pandas-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6a0fe052cf27ceb29be9429428b4918f3740e37ff185658f40d8702f0b3e09"}, + {file = "pandas-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d81e1813191070440d4c7a413cb673052b3b4a984ffd86b8dd468c45742d3cc"}, + {file = "pandas-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eb20252720b1cc1b7d0b2879ffc7e0542dd568f24d7c4b2347cb035206936421"}, + {file = "pandas-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:38f74ef7ebc0ffb43b3d633e23d74882bce7e27bfa09607f3c5d3e03ffd9a4a5"}, + {file = "pandas-2.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cda72cc8c4761c8f1d97b169661f23a86b16fdb240bdc341173aee17e4d6cedd"}, + {file = "pandas-2.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d97daeac0db8c993420b10da4f5f5b39b01fc9ca689a17844e07c0a35ac96b4b"}, + {file = "pandas-2.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c58b1113892e0c8078f006a167cc210a92bdae23322bb4614f2f0b7a4b510f"}, + {file = "pandas-2.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:629124923bcf798965b054a540f9ccdfd60f71361255c81fa1ecd94a904b9dd3"}, + {file = "pandas-2.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:70cf866af3ab346a10debba8ea78077cf3a8cd14bd5e4bed3d41555a3280041c"}, + {file = "pandas-2.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:d53c8c1001f6a192ff1de1efe03b31a423d0eee2e9e855e69d004308e046e694"}, + {file = "pandas-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:86f100b3876b8c6d1a2c66207288ead435dc71041ee4aea789e55ef0e06408cb"}, + {file = "pandas-2.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28f330845ad21c11db51e02d8d69acc9035edfd1116926ff7245c7215db57957"}, + {file = "pandas-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9a6ccf0963db88f9b12df6720e55f337447aea217f426a22d71f4213a3099a6"}, + {file = "pandas-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d99e678180bc59b0c9443314297bddce4ad35727a1a2656dbe585fd78710b3b9"}, + {file = "pandas-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b31da36d376d50a1a492efb18097b9101bdbd8b3fbb3f49006e02d4495d4c644"}, + {file = "pandas-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0164b85937707ec7f70b34a6c3a578dbf0f50787f910f21ca3b26a7fd3363437"}, + {file = "pandas-2.1.0.tar.gz", hash = "sha256:62c24c7fc59e42b775ce0679cfa7b14a5f9bfb7643cfbe708c960699e05fb918"}, +] + +[package.dependencies] +numpy = {version = ">=1.23.2", markers = "python_version >= \"3.11\""} +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"] +aws = ["s3fs (>=2022.05.0)"] +clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"] +compression = ["zstandard (>=0.17.0)"] +computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2022.05.0)"] +gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"] +hdf5 = ["tables (>=3.7.0)"] +html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"] +mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"] +spss = ["pyreadstat (>=1.1.5)"] +sql-other = ["SQLAlchemy (>=1.4.36)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.8.0)"] + +[[package]] +name = "pandas" +version = "2.3.0" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and python_version < \"3.12\"" +files = [ + {file = "pandas-2.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:625466edd01d43b75b1883a64d859168e4556261a5035b32f9d743b67ef44634"}, + {file = "pandas-2.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6872d695c896f00df46b71648eea332279ef4077a409e2fe94220208b6bb675"}, + {file = "pandas-2.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4dd97c19bd06bc557ad787a15b6489d2614ddaab5d104a0310eb314c724b2d2"}, + {file = "pandas-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:034abd6f3db8b9880aaee98f4f5d4dbec7c4829938463ec046517220b2f8574e"}, + {file = "pandas-2.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23c2b2dc5213810208ca0b80b8666670eb4660bbfd9d45f58592cc4ddcfd62e1"}, + {file = "pandas-2.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:39ff73ec07be5e90330cc6ff5705c651ace83374189dcdcb46e6ff54b4a72cd6"}, + {file = "pandas-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:40cecc4ea5abd2921682b57532baea5588cc5f80f0231c624056b146887274d2"}, + {file = "pandas-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8adff9f138fc614347ff33812046787f7d43b3cef7c0f0171b3340cae333f6ca"}, + {file = "pandas-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e5f08eb9a445d07720776df6e641975665c9ea12c9d8a331e0f6890f2dcd76ef"}, + {file = "pandas-2.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa35c266c8cd1a67d75971a1912b185b492d257092bdd2709bbdebe574ed228d"}, + {file = "pandas-2.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a0cc77b0f089d2d2ffe3007db58f170dae9b9f54e569b299db871a3ab5bf46"}, + {file = "pandas-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c06f6f144ad0a1bf84699aeea7eff6068ca5c63ceb404798198af7eb86082e33"}, + {file = "pandas-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ed16339bc354a73e0a609df36d256672c7d296f3f767ac07257801aa064ff73c"}, + {file = "pandas-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:fa07e138b3f6c04addfeaf56cc7fdb96c3b68a3fe5e5401251f231fce40a0d7a"}, + {file = "pandas-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2eb4728a18dcd2908c7fccf74a982e241b467d178724545a48d0caf534b38ebf"}, + {file = "pandas-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9d8c3187be7479ea5c3d30c32a5d73d62a621166675063b2edd21bc47614027"}, + {file = "pandas-2.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ff730713d4c4f2f1c860e36c005c7cefc1c7c80c21c0688fd605aa43c9fcf09"}, + {file = "pandas-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba24af48643b12ffe49b27065d3babd52702d95ab70f50e1b34f71ca703e2c0d"}, + {file = "pandas-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:404d681c698e3c8a40a61d0cd9412cc7364ab9a9cc6e144ae2992e11a2e77a20"}, + {file = "pandas-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6021910b086b3ca756755e86ddc64e0ddafd5e58e076c72cb1585162e5ad259b"}, + {file = "pandas-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:094e271a15b579650ebf4c5155c05dcd2a14fd4fdd72cf4854b2f7ad31ea30be"}, + {file = "pandas-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c7e2fc25f89a49a11599ec1e76821322439d90820108309bf42130d2f36c983"}, + {file = "pandas-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6da97aeb6a6d233fb6b17986234cc723b396b50a3c6804776351994f2a658fd"}, + {file = "pandas-2.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb32dc743b52467d488e7a7c8039b821da2826a9ba4f85b89ea95274f863280f"}, + {file = "pandas-2.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:213cd63c43263dbb522c1f8a7c9d072e25900f6975596f883f4bebd77295d4f3"}, + {file = "pandas-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1d2b33e68d0ce64e26a4acc2e72d747292084f4e8db4c847c6f5f6cbe56ed6d8"}, + {file = "pandas-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:430a63bae10b5086995db1b02694996336e5a8ac9a96b4200572b413dfdfccb9"}, + {file = "pandas-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4930255e28ff5545e2ca404637bcc56f031893142773b3468dc021c6c32a1390"}, + {file = "pandas-2.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f925f1ef673b4bd0271b1809b72b3270384f2b7d9d14a189b12b7fc02574d575"}, + {file = "pandas-2.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78ad363ddb873a631e92a3c063ade1ecfb34cae71e9a2be6ad100f875ac1042"}, + {file = "pandas-2.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951805d146922aed8357e4cc5671b8b0b9be1027f0619cea132a9f3f65f2f09c"}, + {file = "pandas-2.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a881bc1309f3fce34696d07b00f13335c41f5f5a8770a33b09ebe23261cfc67"}, + {file = "pandas-2.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e1991bbb96f4050b09b5f811253c4f3cf05ee89a589379aa36cd623f21a31d6f"}, + {file = "pandas-2.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bb3be958022198531eb7ec2008cfc78c5b1eed51af8600c6c5d9160d89d8d249"}, + {file = "pandas-2.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9efc0acbbffb5236fbdf0409c04edce96bec4bdaa649d49985427bd1ec73e085"}, + {file = "pandas-2.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75651c14fde635e680496148a8526b328e09fe0572d9ae9b638648c46a544ba3"}, + {file = "pandas-2.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5be867a0541a9fb47a4be0c5790a4bccd5b77b92f0a59eeec9375fafc2aa14"}, + {file = "pandas-2.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84141f722d45d0c2a89544dd29d35b3abfc13d2250ed7e68394eda7564bd6324"}, + {file = "pandas-2.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f95a2aef32614ed86216d3c450ab12a4e82084e8102e355707a1d96e33d51c34"}, + {file = "pandas-2.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e0f51973ba93a9f97185049326d75b942b9aeb472bec616a129806facb129ebb"}, + {file = "pandas-2.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b198687ca9c8529662213538a9bb1e60fa0bf0f6af89292eb68fea28743fcd5a"}, + {file = "pandas-2.3.0.tar.gz", hash = "sha256:34600ab34ebf1131a7613a260a61dbe8b62c188ec0ea4c296da7c9a06b004133"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.22.4", markers = "python_version < \"3.11\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "pillow" +version = "10.4.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions ; python_version < \"3.10\""] +xmp = ["defusedxml"] + +[[package]] +name = "pillow" +version = "11.2.1" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "pillow-11.2.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047"}, + {file = "pillow-11.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4ba4be812c7a40280629e55ae0b14a0aafa150dd6451297562e1764808bbe61"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8bd62331e5032bc396a93609982a9ab6b411c05078a52f5fe3cc59234a3abd1"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:562d11134c97a62fe3af29581f083033179f7ff435f78392565a1ad2d1c2c45c"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:c97209e85b5be259994eb5b69ff50c5d20cca0f458ef9abd835e262d9d88b39d"}, + {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0c3e6d0f59171dfa2e25d7116217543310908dfa2770aa64b8f87605f8cacc97"}, + {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc1c3bc53befb6096b84165956e886b1729634a799e9d6329a0c512ab651e579"}, + {file = "pillow-11.2.1-cp310-cp310-win32.whl", hash = "sha256:312c77b7f07ab2139924d2639860e084ec2a13e72af54d4f08ac843a5fc9c79d"}, + {file = "pillow-11.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:9bc7ae48b8057a611e5fe9f853baa88093b9a76303937449397899385da06fad"}, + {file = "pillow-11.2.1-cp310-cp310-win_arm64.whl", hash = "sha256:2728567e249cdd939f6cc3d1f049595c66e4187f3c34078cbc0a7d21c47482d2"}, + {file = "pillow-11.2.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35ca289f712ccfc699508c4658a1d14652e8033e9b69839edf83cbdd0ba39e70"}, + {file = "pillow-11.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0409af9f829f87a2dfb7e259f78f317a5351f2045158be321fd135973fff7bf"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4e5c5edee874dce4f653dbe59db7c73a600119fbea8d31f53423586ee2aafd7"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b93a07e76d13bff9444f1a029e0af2964e654bfc2e2c2d46bfd080df5ad5f3d8"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:e6def7eed9e7fa90fde255afaf08060dc4b343bbe524a8f69bdd2a2f0018f600"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8f4f3724c068be008c08257207210c138d5f3731af6c155a81c2b09a9eb3a788"}, + {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a0a6709b47019dff32e678bc12c63008311b82b9327613f534e496dacaefb71e"}, + {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f6b0c664ccb879109ee3ca702a9272d877f4fcd21e5eb63c26422fd6e415365e"}, + {file = "pillow-11.2.1-cp311-cp311-win32.whl", hash = "sha256:cc5d875d56e49f112b6def6813c4e3d3036d269c008bf8aef72cd08d20ca6df6"}, + {file = "pillow-11.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:0f5c7eda47bf8e3c8a283762cab94e496ba977a420868cb819159980b6709193"}, + {file = "pillow-11.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:4d375eb838755f2528ac8cbc926c3e31cc49ca4ad0cf79cff48b20e30634a4a7"}, + {file = "pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f"}, + {file = "pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4"}, + {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443"}, + {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c"}, + {file = "pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3"}, + {file = "pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941"}, + {file = "pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb"}, + {file = "pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28"}, + {file = "pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155"}, + {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14"}, + {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b"}, + {file = "pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2"}, + {file = "pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691"}, + {file = "pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c"}, + {file = "pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22"}, + {file = "pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91"}, + {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751"}, + {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9"}, + {file = "pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd"}, + {file = "pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e"}, + {file = "pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681"}, + {file = "pillow-11.2.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:7491cf8a79b8eb867d419648fff2f83cb0b3891c8b36da92cc7f1931d46108c8"}, + {file = "pillow-11.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b02d8f9cb83c52578a0b4beadba92e37d83a4ef11570a8688bbf43f4ca50909"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:014ca0050c85003620526b0ac1ac53f56fc93af128f7546623cc8e31875ab928"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3692b68c87096ac6308296d96354eddd25f98740c9d2ab54e1549d6c8aea9d79"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:f781dcb0bc9929adc77bad571b8621ecb1e4cdef86e940fe2e5b5ee24fd33b35"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:2b490402c96f907a166615e9a5afacf2519e28295f157ec3a2bb9bd57de638cb"}, + {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dd6b20b93b3ccc9c1b597999209e4bc5cf2853f9ee66e3fc9a400a78733ffc9a"}, + {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4b835d89c08a6c2ee7781b8dd0a30209a8012b5f09c0a665b65b0eb3560b6f36"}, + {file = "pillow-11.2.1-cp39-cp39-win32.whl", hash = "sha256:b10428b3416d4f9c61f94b494681280be7686bda15898a3a9e08eb66a6d92d67"}, + {file = "pillow-11.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:6ebce70c3f486acf7591a3d73431fa504a4e18a9b97ff27f5f47b7368e4b9dd1"}, + {file = "pillow-11.2.1-cp39-cp39-win_arm64.whl", hash = "sha256:c27476257b2fdcd7872d54cfd119b3a9ce4610fb85c8e32b70b42e3680a29a1e"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9b7b0d4fd2635f54ad82785d56bc0d94f147096493a79985d0ab57aedd563156"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:aa442755e31c64037aa7c1cb186e0b369f8416c567381852c63444dd666fb772"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0d3348c95b766f54b76116d53d4cb171b52992a1027e7ca50c81b43b9d9e363"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85d27ea4c889342f7e35f6d56e7e1cb345632ad592e8c51b693d7b7556043ce0"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bf2c33d6791c598142f00c9c4c7d47f6476731c31081331664eb26d6ab583e01"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e616e7154c37669fc1dfc14584f11e284e05d1c650e1c0f972f281c4ccc53193"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:39ad2e0f424394e3aebc40168845fee52df1394a4673a6ee512d840d14ab3013"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:80f1df8dbe9572b4b7abdfa17eb5d78dd620b1d55d9e25f834efdbee872d3aed"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ea926cfbc3957090becbcbbb65ad177161a2ff2ad578b5a6ec9bb1e1cd78753c"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:738db0e0941ca0376804d4de6a782c005245264edaa253ffce24e5a15cbdc7bd"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db98ab6565c69082ec9b0d4e40dd9f6181dab0dd236d26f7a50b8b9bfbd5076"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:036e53f4170e270ddb8797d4c590e6dd14d28e15c7da375c18978045f7e6c37b"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:14f73f7c291279bd65fda51ee87affd7c1e097709f7fdd0188957a16c264601f"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044"}, + {file = "pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +test-arrow = ["pyarrow"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] +typing = ["typing-extensions ; python_version < \"3.10\""] +xmp = ["defusedxml"] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "platformdirs" +version = "4.3.8" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "posthog" +version = "4.2.0" +description = "Integrate PostHog into any python application." +optional = false +python-versions = "*" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "posthog-4.2.0-py2.py3-none-any.whl", hash = "sha256:60c7066caac43e43e326e9196d8c1aadeafc8b0be9e5c108446e352711fa456b"}, + {file = "posthog-4.2.0.tar.gz", hash = "sha256:c4abc95de03294be005b3b7e8735e9d7abab88583da26262112bacce64b0c3b5"}, +] + +[package.dependencies] +backoff = ">=1.10.0" +distro = ">=1.5.0" +python-dateutil = ">=2.2" +requests = ">=2.7,<3.0" +six = ">=1.5" + +[package.extras] +dev = ["black", "django-stubs", "flake8", "flake8-print", "isort", "lxml", "mypy", "mypy-baseline", "pre-commit", "pydantic", "types-mock", "types-python-dateutil", "types-requests", "types-setuptools", "types-six"] +langchain = ["langchain (>=0.2.0)"] +sentry = ["django", "sentry-sdk"] +test = ["anthropic", "coverage", "django", "flake8", "freezegun (==1.5.1)", "google-genai", "langchain-anthropic (>=0.2.0)", "langchain-community (>=0.2.0)", "langchain-openai (>=0.2.0)", "langgraph", "mock (>=2.0.0)", "openai", "parameterized (>=0.8.1)", "pydantic", "pylint", "pytest", "pytest-asyncio", "pytest-timeout"] + +[[package]] +name = "posthog" +version = "5.4.0" +description = "Integrate PostHog into any python application." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "posthog-5.4.0-py3-none-any.whl", hash = "sha256:284dfa302f64353484420b52d4ad81ff5c2c2d1d607c4e2db602ac72761831bd"}, + {file = "posthog-5.4.0.tar.gz", hash = "sha256:701669261b8d07cdde0276e5bc096b87f9e200e3b9589c5ebff14df658c5893c"}, +] + +[package.dependencies] +backoff = ">=1.10.0" +distro = ">=1.5.0" +python-dateutil = ">=2.2" +requests = ">=2.7,<3.0" +six = ">=1.5" + +[package.extras] +dev = ["django-stubs", "lxml", "mypy", "mypy-baseline", "packaging", "pre-commit", "pydantic", "ruff", "setuptools", "tomli", "tomli_w", "twine", "types-mock", "types-python-dateutil", "types-requests", "types-setuptools", "types-six", "wheel"] +langchain = ["langchain (>=0.2.0)"] +test = ["anthropic", "coverage", "django", "freezegun (==1.5.1)", "google-genai", "langchain-anthropic (>=0.3.15)", "langchain-community (>=0.3.25)", "langchain-core (>=0.3.65)", "langchain-openai (>=0.3.22)", "langgraph (>=0.4.8)", "mock (>=2.0.0)", "openai", "parameterized (>=0.8.1)", "pydantic", "pytest", "pytest-asyncio", "pytest-timeout"] + +[[package]] +name = "propcache" +version = "0.2.0" +description = "Accelerated property cache" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"}, + {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"}, + {file = "propcache-0.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:33ac8f098df0585c0b53009f039dfd913b38c1d2edafed0cedcc0c32a05aa110"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e48e8875e6c13909c800fa344cd54cc4b2b0db1d5f911f840458a500fde2c2"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388f3217649d6d59292b722d940d4d2e1e6a7003259eb835724092a1cca0203a"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f571aea50ba5623c308aa146eb650eebf7dbe0fd8c5d946e28343cb3b5aad577"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dfafb44f7bb35c0c06eda6b2ab4bfd58f02729e7c4045e179f9a861b07c9850"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3ebe9a75be7ab0b7da2464a77bb27febcb4fab46a34f9288f39d74833db7f61"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2f0d0f976985f85dfb5f3d685697ef769faa6b71993b46b295cdbbd6be8cc37"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a3dc1a4b165283bd865e8f8cb5f0c64c05001e0718ed06250d8cac9bec115b48"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e0f07b42d2a50c7dd2d8675d50f7343d998c64008f1da5fef888396b7f84630"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e63e3e1e0271f374ed489ff5ee73d4b6e7c60710e1f76af5f0e1a6117cd26394"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:56bb5c98f058a41bb58eead194b4db8c05b088c93d94d5161728515bd52b052b"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7665f04d0c7f26ff8bb534e1c65068409bf4687aa2534faf7104d7182debb336"}, + {file = "propcache-0.2.0-cp310-cp310-win32.whl", hash = "sha256:7cf18abf9764746b9c8704774d8b06714bcb0a63641518a3a89c7f85cc02c2ad"}, + {file = "propcache-0.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:cfac69017ef97db2438efb854edf24f5a29fd09a536ff3a992b75990720cdc99"}, + {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:63f13bf09cc3336eb04a837490b8f332e0db41da66995c9fd1ba04552e516354"}, + {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608cce1da6f2672a56b24a015b42db4ac612ee709f3d29f27a00c943d9e851de"}, + {file = "propcache-0.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:466c219deee4536fbc83c08d09115249db301550625c7fef1c5563a584c9bc87"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc2db02409338bf36590aa985a461b2c96fce91f8e7e0f14c50c5fcc4f229016"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6ed8db0a556343d566a5c124ee483ae113acc9a557a807d439bcecc44e7dfbb"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91997d9cb4a325b60d4e3f20967f8eb08dfcb32b22554d5ef78e6fd1dda743a2"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c7dde9e533c0a49d802b4f3f218fa9ad0a1ce21f2c2eb80d5216565202acab4"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffcad6c564fe6b9b8916c1aefbb37a362deebf9394bd2974e9d84232e3e08504"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:97a58a28bcf63284e8b4d7b460cbee1edaab24634e82059c7b8c09e65284f178"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:945db8ee295d3af9dbdbb698cce9bbc5c59b5c3fe328bbc4387f59a8a35f998d"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39e104da444a34830751715f45ef9fc537475ba21b7f1f5b0f4d71a3b60d7fe2"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c5ecca8f9bab618340c8e848d340baf68bcd8ad90a8ecd7a4524a81c1764b3db"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c436130cc779806bdf5d5fae0d848713105472b8566b75ff70048c47d3961c5b"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:191db28dc6dcd29d1a3e063c3be0b40688ed76434622c53a284e5427565bbd9b"}, + {file = "propcache-0.2.0-cp311-cp311-win32.whl", hash = "sha256:5f2564ec89058ee7c7989a7b719115bdfe2a2fb8e7a4543b8d1c0cc4cf6478c1"}, + {file = "propcache-0.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6e2e54267980349b723cff366d1e29b138b9a60fa376664a157a342689553f71"}, + {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ee7606193fb267be4b2e3b32714f2d58cad27217638db98a60f9efb5efeccc2"}, + {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:91ee8fc02ca52e24bcb77b234f22afc03288e1dafbb1f88fe24db308910c4ac7"}, + {file = "propcache-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e900bad2a8456d00a113cad8c13343f3b1f327534e3589acc2219729237a2e8"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f52a68c21363c45297aca15561812d542f8fc683c85201df0bebe209e349f793"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e41d67757ff4fbc8ef2af99b338bfb955010444b92929e9e55a6d4dcc3c4f09"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a64e32f8bd94c105cc27f42d3b658902b5bcc947ece3c8fe7bc1b05982f60e89"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55346705687dbd7ef0d77883ab4f6fabc48232f587925bdaf95219bae072491e"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00181262b17e517df2cd85656fcd6b4e70946fe62cd625b9d74ac9977b64d8d9"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6994984550eaf25dd7fc7bd1b700ff45c894149341725bb4edc67f0ffa94efa4"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:56295eb1e5f3aecd516d91b00cfd8bf3a13991de5a479df9e27dd569ea23959c"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:439e76255daa0f8151d3cb325f6dd4a3e93043e6403e6491813bcaaaa8733887"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f6475a1b2ecb310c98c28d271a30df74f9dd436ee46d09236a6b750a7599ce57"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3444cdba6628accf384e349014084b1cacd866fbb88433cd9d279d90a54e0b23"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4a9d9b4d0a9b38d1c391bb4ad24aa65f306c6f01b512e10a8a34a2dc5675d348"}, + {file = "propcache-0.2.0-cp312-cp312-win32.whl", hash = "sha256:69d3a98eebae99a420d4b28756c8ce6ea5a29291baf2dc9ff9414b42676f61d5"}, + {file = "propcache-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ad9c9b99b05f163109466638bd30ada1722abb01bbb85c739c50b6dc11f92dc3"}, + {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ecddc221a077a8132cf7c747d5352a15ed763b674c0448d811f408bf803d9ad7"}, + {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0e53cb83fdd61cbd67202735e6a6687a7b491c8742dfc39c9e01e80354956763"}, + {file = "propcache-0.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92fe151145a990c22cbccf9ae15cae8ae9eddabfc949a219c9f667877e40853d"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a21ef516d36909931a2967621eecb256018aeb11fc48656e3257e73e2e247a"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f88a4095e913f98988f5b338c1d4d5d07dbb0b6bad19892fd447484e483ba6b"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a5b3bb545ead161be780ee85a2b54fdf7092815995661947812dde94a40f6fb"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67aeb72e0f482709991aa91345a831d0b707d16b0257e8ef88a2ad246a7280bf"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c997f8c44ec9b9b0bcbf2d422cc00a1d9b9c681f56efa6ca149a941e5560da2"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a66df3d4992bc1d725b9aa803e8c5a66c010c65c741ad901e260ece77f58d2f"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:3ebbcf2a07621f29638799828b8d8668c421bfb94c6cb04269130d8de4fb7136"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1235c01ddaa80da8235741e80815ce381c5267f96cc49b1477fdcf8c047ef325"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3947483a381259c06921612550867b37d22e1df6d6d7e8361264b6d037595f44"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d5bed7f9805cc29c780f3aee05de3262ee7ce1f47083cfe9f77471e9d6777e83"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4a91d44379f45f5e540971d41e4626dacd7f01004826a18cb048e7da7e96544"}, + {file = "propcache-0.2.0-cp313-cp313-win32.whl", hash = "sha256:f902804113e032e2cdf8c71015651c97af6418363bea8d78dc0911d56c335032"}, + {file = "propcache-0.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:8f188cfcc64fb1266f4684206c9de0e80f54622c3f22a910cbd200478aeae61e"}, + {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:53d1bd3f979ed529f0805dd35ddaca330f80a9a6d90bc0121d2ff398f8ed8861"}, + {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:83928404adf8fb3d26793665633ea79b7361efa0287dfbd372a7e74311d51ee6"}, + {file = "propcache-0.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77a86c261679ea5f3896ec060be9dc8e365788248cc1e049632a1be682442063"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218db2a3c297a3768c11a34812e63b3ac1c3234c3a086def9c0fee50d35add1f"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7735e82e3498c27bcb2d17cb65d62c14f1100b71723b68362872bca7d0913d90"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:20a617c776f520c3875cf4511e0d1db847a076d720714ae35ffe0df3e440be68"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b69535c870670c9f9b14a75d28baa32221d06f6b6fa6f77a0a13c5a7b0a5b9"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4569158070180c3855e9c0791c56be3ceeb192defa2cdf6a3f39e54319e56b89"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:db47514ffdbd91ccdc7e6f8407aac4ee94cc871b15b577c1c324236b013ddd04"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:2a60ad3e2553a74168d275a0ef35e8c0a965448ffbc3b300ab3a5bb9956c2162"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:662dd62358bdeaca0aee5761de8727cfd6861432e3bb828dc2a693aa0471a563"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:25a1f88b471b3bc911d18b935ecb7115dff3a192b6fef46f0bfaf71ff4f12418"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:f60f0ac7005b9f5a6091009b09a419ace1610e163fa5deaba5ce3484341840e7"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:74acd6e291f885678631b7ebc85d2d4aec458dd849b8c841b57ef04047833bed"}, + {file = "propcache-0.2.0-cp38-cp38-win32.whl", hash = "sha256:d9b6ddac6408194e934002a69bcaadbc88c10b5f38fb9307779d1c629181815d"}, + {file = "propcache-0.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:676135dcf3262c9c5081cc8f19ad55c8a64e3f7282a21266d05544450bffc3a5"}, + {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:25c8d773a62ce0451b020c7b29a35cfbc05de8b291163a7a0f3b7904f27253e6"}, + {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:375a12d7556d462dc64d70475a9ee5982465fbb3d2b364f16b86ba9135793638"}, + {file = "propcache-0.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1ec43d76b9677637a89d6ab86e1fef70d739217fefa208c65352ecf0282be957"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f45eec587dafd4b2d41ac189c2156461ebd0c1082d2fe7013571598abb8505d1"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc092ba439d91df90aea38168e11f75c655880c12782facf5cf9c00f3d42b562"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa1076244f54bb76e65e22cb6910365779d5c3d71d1f18b275f1dfc7b0d71b4d"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:682a7c79a2fbf40f5dbb1eb6bfe2cd865376deeac65acf9beb607505dced9e12"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e40876731f99b6f3c897b66b803c9e1c07a989b366c6b5b475fafd1f7ba3fb8"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:363ea8cd3c5cb6679f1c2f5f1f9669587361c062e4899fce56758efa928728f8"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:140fbf08ab3588b3468932974a9331aff43c0ab8a2ec2c608b6d7d1756dbb6cb"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e70fac33e8b4ac63dfc4c956fd7d85a0b1139adcfc0d964ce288b7c527537fea"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b33d7a286c0dc1a15f5fc864cc48ae92a846df287ceac2dd499926c3801054a6"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f6d5749fdd33d90e34c2efb174c7e236829147a2713334d708746e94c4bde40d"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22aa8f2272d81d9317ff5756bb108021a056805ce63dd3630e27d042c8092798"}, + {file = "propcache-0.2.0-cp39-cp39-win32.whl", hash = "sha256:73e4b40ea0eda421b115248d7e79b59214411109a5bc47d0d48e4c73e3b8fcf9"}, + {file = "propcache-0.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:9517d5e9e0731957468c29dbfd0f976736a0e55afaea843726e887f36fe017df"}, + {file = "propcache-0.2.0-py3-none-any.whl", hash = "sha256:2ccc28197af5313706511fab3a8b66dcd6da067a1331372c82ea1cb74285e036"}, + {file = "propcache-0.2.0.tar.gz", hash = "sha256:df81779732feb9d01e5d513fad0122efb3d53bbc75f61b2a4f29a020bc985e70"}, +] + +[[package]] +name = "propcache" +version = "0.3.2" +description = "Accelerated property cache" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:22d9962a358aedbb7a2e36187ff273adeaab9743373a272976d2e348d08c7770"}, + {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0d0fda578d1dc3f77b6b5a5dce3b9ad69a8250a891760a548df850a5e8da87f3"}, + {file = "propcache-0.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3def3da3ac3ce41562d85db655d18ebac740cb3fa4367f11a52b3da9d03a5cc3"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bec58347a5a6cebf239daba9bda37dffec5b8d2ce004d9fe4edef3d2815137e"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55ffda449a507e9fbd4aca1a7d9aa6753b07d6166140e5a18d2ac9bc49eac220"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64a67fb39229a8a8491dd42f864e5e263155e729c2e7ff723d6e25f596b1e8cb"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9da1cf97b92b51253d5b68cf5a2b9e0dafca095e36b7f2da335e27dc6172a614"}, + {file = "propcache-0.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5f559e127134b07425134b4065be45b166183fdcb433cb6c24c8e4149056ad50"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aff2e4e06435d61f11a428360a932138d0ec288b0a31dd9bd78d200bd4a2b339"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:4927842833830942a5d0a56e6f4839bc484785b8e1ce8d287359794818633ba0"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6107ddd08b02654a30fb8ad7a132021759d750a82578b94cd55ee2772b6ebea2"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:70bd8b9cd6b519e12859c99f3fc9a93f375ebd22a50296c3a295028bea73b9e7"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2183111651d710d3097338dd1893fcf09c9f54e27ff1a8795495a16a469cc90b"}, + {file = "propcache-0.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fb075ad271405dcad8e2a7ffc9a750a3bf70e533bd86e89f0603e607b93aa64c"}, + {file = "propcache-0.3.2-cp310-cp310-win32.whl", hash = "sha256:404d70768080d3d3bdb41d0771037da19d8340d50b08e104ca0e7f9ce55fce70"}, + {file = "propcache-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:7435d766f978b4ede777002e6b3b6641dd229cd1da8d3d3106a45770365f9ad9"}, + {file = "propcache-0.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0b8d2f607bd8f80ddc04088bc2a037fdd17884a6fcadc47a96e334d72f3717be"}, + {file = "propcache-0.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06766d8f34733416e2e34f46fea488ad5d60726bb9481d3cddf89a6fa2d9603f"}, + {file = "propcache-0.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2dc1f4a1df4fecf4e6f68013575ff4af84ef6f478fe5344317a65d38a8e6dc9"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be29c4f4810c5789cf10ddf6af80b041c724e629fa51e308a7a0fb19ed1ef7bf"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59d61f6970ecbd8ff2e9360304d5c8876a6abd4530cb752c06586849ac8a9dc9"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62180e0b8dbb6b004baec00a7983e4cc52f5ada9cd11f48c3528d8cfa7b96a66"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c144ca294a204c470f18cf4c9d78887810d04a3e2fbb30eea903575a779159df"}, + {file = "propcache-0.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5c2a784234c28854878d68978265617aa6dc0780e53d44b4d67f3651a17a9a2"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5745bc7acdafa978ca1642891b82c19238eadc78ba2aaa293c6863b304e552d7"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:c0075bf773d66fa8c9d41f66cc132ecc75e5bb9dd7cce3cfd14adc5ca184cb95"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5f57aa0847730daceff0497f417c9de353c575d8da3579162cc74ac294c5369e"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:eef914c014bf72d18efb55619447e0aecd5fb7c2e3fa7441e2e5d6099bddff7e"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2a4092e8549031e82facf3decdbc0883755d5bbcc62d3aea9d9e185549936dcf"}, + {file = "propcache-0.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:85871b050f174bc0bfb437efbdb68aaf860611953ed12418e4361bc9c392749e"}, + {file = "propcache-0.3.2-cp311-cp311-win32.whl", hash = "sha256:36c8d9b673ec57900c3554264e630d45980fd302458e4ac801802a7fd2ef7897"}, + {file = "propcache-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53af8cb6a781b02d2ea079b5b853ba9430fcbe18a8e3ce647d5982a3ff69f39"}, + {file = "propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10"}, + {file = "propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154"}, + {file = "propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67"}, + {file = "propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06"}, + {file = "propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1"}, + {file = "propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1"}, + {file = "propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c"}, + {file = "propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945"}, + {file = "propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252"}, + {file = "propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3"}, + {file = "propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206"}, + {file = "propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43"}, + {file = "propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02"}, + {file = "propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05"}, + {file = "propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b"}, + {file = "propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0"}, + {file = "propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725"}, + {file = "propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770"}, + {file = "propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330"}, + {file = "propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394"}, + {file = "propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198"}, + {file = "propcache-0.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a7fad897f14d92086d6b03fdd2eb844777b0c4d7ec5e3bac0fbae2ab0602bbe5"}, + {file = "propcache-0.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1f43837d4ca000243fd7fd6301947d7cb93360d03cd08369969450cc6b2ce3b4"}, + {file = "propcache-0.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:261df2e9474a5949c46e962065d88eb9b96ce0f2bd30e9d3136bcde84befd8f2"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e514326b79e51f0a177daab1052bc164d9d9e54133797a3a58d24c9c87a3fe6d"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4a996adb6904f85894570301939afeee65f072b4fd265ed7e569e8d9058e4ec"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76cace5d6b2a54e55b137669b30f31aa15977eeed390c7cbfb1dafa8dfe9a701"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31248e44b81d59d6addbb182c4720f90b44e1efdc19f58112a3c3a1615fb47ef"}, + {file = "propcache-0.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abb7fa19dbf88d3857363e0493b999b8011eea856b846305d8c0512dfdf8fbb1"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d81ac3ae39d38588ad0549e321e6f773a4e7cc68e7751524a22885d5bbadf886"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:cc2782eb0f7a16462285b6f8394bbbd0e1ee5f928034e941ffc444012224171b"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:db429c19a6c7e8a1c320e6a13c99799450f411b02251fb1b75e6217cf4a14fcb"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:21d8759141a9e00a681d35a1f160892a36fb6caa715ba0b832f7747da48fb6ea"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2ca6d378f09adb13837614ad2754fa8afaee330254f404299611bce41a8438cb"}, + {file = "propcache-0.3.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:34a624af06c048946709f4278b4176470073deda88d91342665d95f7c6270fbe"}, + {file = "propcache-0.3.2-cp39-cp39-win32.whl", hash = "sha256:4ba3fef1c30f306b1c274ce0b8baaa2c3cdd91f645c48f06394068f37d3837a1"}, + {file = "propcache-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:7a2368eed65fc69a7a7a40b27f22e85e7627b74216f0846b04ba5c116e191ec9"}, + {file = "propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f"}, + {file = "propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168"}, +] + +[[package]] +name = "protobuf" +version = "4.25.8" +description = "" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "protobuf-4.25.8-cp310-abi3-win32.whl", hash = "sha256:504435d831565f7cfac9f0714440028907f1975e4bed228e58e72ecfff58a1e0"}, + {file = "protobuf-4.25.8-cp310-abi3-win_amd64.whl", hash = "sha256:bd551eb1fe1d7e92c1af1d75bdfa572eff1ab0e5bf1736716814cdccdb2360f9"}, + {file = "protobuf-4.25.8-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:ca809b42f4444f144f2115c4c1a747b9a404d590f18f37e9402422033e464e0f"}, + {file = "protobuf-4.25.8-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:9ad7ef62d92baf5a8654fbb88dac7fa5594cfa70fd3440488a5ca3bfc6d795a7"}, + {file = "protobuf-4.25.8-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:83e6e54e93d2b696a92cad6e6efc924f3850f82b52e1563778dfab8b355101b0"}, + {file = "protobuf-4.25.8-cp38-cp38-win32.whl", hash = "sha256:27d498ffd1f21fb81d987a041c32d07857d1d107909f5134ba3350e1ce80a4af"}, + {file = "protobuf-4.25.8-cp38-cp38-win_amd64.whl", hash = "sha256:d552c53d0415449c8d17ced5c341caba0d89dbf433698e1436c8fa0aae7808a3"}, + {file = "protobuf-4.25.8-cp39-cp39-win32.whl", hash = "sha256:077ff8badf2acf8bc474406706ad890466274191a48d0abd3bd6987107c9cde5"}, + {file = "protobuf-4.25.8-cp39-cp39-win_amd64.whl", hash = "sha256:f4510b93a3bec6eba8fd8f1093e9d7fb0d4a24d1a81377c10c0e5bbfe9e4ed24"}, + {file = "protobuf-4.25.8-py3-none-any.whl", hash = "sha256:15a0af558aa3b13efef102ae6e4f3efac06f1eea11afb3a57db2901447d9fb59"}, + {file = "protobuf-4.25.8.tar.gz", hash = "sha256:6135cf8affe1fc6f76cced2641e4ea8d3e59518d1f24ae41ba97bcad82d397cd"}, +] + +[[package]] +name = "pyarrow" +version = "17.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, + {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"}, + {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"}, + {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"}, + {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"}, + {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"}, + {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"}, + {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"}, +] + +[package.dependencies] +numpy = ">=1.16.6" + +[package.extras] +test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] + +[[package]] +name = "pyarrow" +version = "20.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "pyarrow-20.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c7dd06fd7d7b410ca5dc839cc9d485d2bc4ae5240851bcd45d85105cc90a47d7"}, + {file = "pyarrow-20.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:d5382de8dc34c943249b01c19110783d0d64b207167c728461add1ecc2db88e4"}, + {file = "pyarrow-20.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6415a0d0174487456ddc9beaead703d0ded5966129fa4fd3114d76b5d1c5ceae"}, + {file = "pyarrow-20.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15aa1b3b2587e74328a730457068dc6c89e6dcbf438d4369f572af9d320a25ee"}, + {file = "pyarrow-20.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:5605919fbe67a7948c1f03b9f3727d82846c053cd2ce9303ace791855923fd20"}, + {file = "pyarrow-20.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a5704f29a74b81673d266e5ec1fe376f060627c2e42c5c7651288ed4b0db29e9"}, + {file = "pyarrow-20.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:00138f79ee1b5aca81e2bdedb91e3739b987245e11fa3c826f9e57c5d102fb75"}, + {file = "pyarrow-20.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f2d67ac28f57a362f1a2c1e6fa98bfe2f03230f7e15927aecd067433b1e70ce8"}, + {file = "pyarrow-20.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:4a8b029a07956b8d7bd742ffca25374dd3f634b35e46cc7a7c3fa4c75b297191"}, + {file = "pyarrow-20.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:24ca380585444cb2a31324c546a9a56abbe87e26069189e14bdba19c86c049f0"}, + {file = "pyarrow-20.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:95b330059ddfdc591a3225f2d272123be26c8fa76e8c9ee1a77aad507361cfdb"}, + {file = "pyarrow-20.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f0fb1041267e9968c6d0d2ce3ff92e3928b243e2b6d11eeb84d9ac547308232"}, + {file = "pyarrow-20.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ff87cc837601532cc8242d2f7e09b4e02404de1b797aee747dd4ba4bd6313f"}, + {file = "pyarrow-20.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:7a3a5dcf54286e6141d5114522cf31dd67a9e7c9133d150799f30ee302a7a1ab"}, + {file = "pyarrow-20.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a6ad3e7758ecf559900261a4df985662df54fb7fdb55e8e3b3aa99b23d526b62"}, + {file = "pyarrow-20.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6bb830757103a6cb300a04610e08d9636f0cd223d32f388418ea893a3e655f1c"}, + {file = "pyarrow-20.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:96e37f0766ecb4514a899d9a3554fadda770fb57ddf42b63d80f14bc20aa7db3"}, + {file = "pyarrow-20.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3346babb516f4b6fd790da99b98bed9708e3f02e734c84971faccb20736848dc"}, + {file = "pyarrow-20.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:75a51a5b0eef32727a247707d4755322cb970be7e935172b6a3a9f9ae98404ba"}, + {file = "pyarrow-20.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:211d5e84cecc640c7a3ab900f930aaff5cd2702177e0d562d426fb7c4f737781"}, + {file = "pyarrow-20.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ba3cf4182828be7a896cbd232aa8dd6a31bd1f9e32776cc3796c012855e1199"}, + {file = "pyarrow-20.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c3a01f313ffe27ac4126f4c2e5ea0f36a5fc6ab51f8726cf41fee4b256680bd"}, + {file = "pyarrow-20.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a2791f69ad72addd33510fec7bb14ee06c2a448e06b649e264c094c5b5f7ce28"}, + {file = "pyarrow-20.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4250e28a22302ce8692d3a0e8ec9d9dde54ec00d237cff4dfa9c1fbf79e472a8"}, + {file = "pyarrow-20.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:89e030dc58fc760e4010148e6ff164d2f44441490280ef1e97a542375e41058e"}, + {file = "pyarrow-20.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6102b4864d77102dbbb72965618e204e550135a940c2534711d5ffa787df2a5a"}, + {file = "pyarrow-20.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:96d6a0a37d9c98be08f5ed6a10831d88d52cac7b13f5287f1e0f625a0de8062b"}, + {file = "pyarrow-20.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:a15532e77b94c61efadde86d10957950392999503b3616b2ffcef7621a002893"}, + {file = "pyarrow-20.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:dd43f58037443af715f34f1322c782ec463a3c8a94a85fdb2d987ceb5658e061"}, + {file = "pyarrow-20.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0d288143a8585806e3cc7c39566407aab646fb9ece164609dac1cfff45f6ae"}, + {file = "pyarrow-20.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6953f0114f8d6f3d905d98e987d0924dabce59c3cda380bdfaa25a6201563b4"}, + {file = "pyarrow-20.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:991f85b48a8a5e839b2128590ce07611fae48a904cae6cab1f089c5955b57eb5"}, + {file = "pyarrow-20.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:97c8dc984ed09cb07d618d57d8d4b67a5100a30c3818c2fb0b04599f0da2de7b"}, + {file = "pyarrow-20.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b71daf534f4745818f96c214dbc1e6124d7daf059167330b610fc69b6f3d3e3"}, + {file = "pyarrow-20.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e8b88758f9303fa5a83d6c90e176714b2fd3852e776fc2d7e42a22dd6c2fb368"}, + {file = "pyarrow-20.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:30b3051b7975801c1e1d387e17c588d8ab05ced9b1e14eec57915f79869b5031"}, + {file = "pyarrow-20.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:ca151afa4f9b7bc45bcc791eb9a89e90a9eb2772767d0b1e5389609c7d03db63"}, + {file = "pyarrow-20.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:4680f01ecd86e0dd63e39eb5cd59ef9ff24a9d166db328679e36c108dc993d4c"}, + {file = "pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f4c8534e2ff059765647aa69b75d6543f9fef59e2cd4c6d18015192565d2b70"}, + {file = "pyarrow-20.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1f8a47f4b4ae4c69c4d702cfbdfe4d41e18e5c7ef6f1bb1c50918c1e81c57b"}, + {file = "pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:a1f60dc14658efaa927f8214734f6a01a806d7690be4b3232ba526836d216122"}, + {file = "pyarrow-20.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:204a846dca751428991346976b914d6d2a82ae5b8316a6ed99789ebf976551e6"}, + {file = "pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f3b117b922af5e4c6b9a9115825726cac7d8b1421c37c2b5e24fbacc8930612c"}, + {file = "pyarrow-20.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e724a3fd23ae5b9c010e7be857f4405ed5e679db5c93e66204db1a69f733936a"}, + {file = "pyarrow-20.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:82f1ee5133bd8f49d31be1299dc07f585136679666b502540db854968576faf9"}, + {file = "pyarrow-20.0.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:1bcbe471ef3349be7714261dea28fe280db574f9d0f77eeccc195a2d161fd861"}, + {file = "pyarrow-20.0.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:a18a14baef7d7ae49247e75641fd8bcbb39f44ed49a9fc4ec2f65d5031aa3b96"}, + {file = "pyarrow-20.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb497649e505dc36542d0e68eca1a3c94ecbe9799cb67b578b55f2441a247fbc"}, + {file = "pyarrow-20.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11529a2283cb1f6271d7c23e4a8f9f8b7fd173f7360776b668e509d712a02eec"}, + {file = "pyarrow-20.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fc1499ed3b4b57ee4e090e1cea6eb3584793fe3d1b4297bbf53f09b434991a5"}, + {file = "pyarrow-20.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:db53390eaf8a4dab4dbd6d93c85c5cf002db24902dbff0ca7d988beb5c9dd15b"}, + {file = "pyarrow-20.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:851c6a8260ad387caf82d2bbf54759130534723e37083111d4ed481cb253cc0d"}, + {file = "pyarrow-20.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e22f80b97a271f0a7d9cd07394a7d348f80d3ac63ed7cc38b6d1b696ab3b2619"}, + {file = "pyarrow-20.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:9965a050048ab02409fb7cbbefeedba04d3d67f2cc899eff505cc084345959ca"}, + {file = "pyarrow-20.0.0.tar.gz", hash = "sha256:febc4a913592573c8d5805091a6c2b5064c8bd6e002131f01061797d91c783c1"}, +] + +[package.extras] +test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] + +[[package]] +name = "pyarrow-hotfix" +version = "0.7" +description = "" +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "pyarrow_hotfix-0.7-py3-none-any.whl", hash = "sha256:3236f3b5f1260f0e2ac070a55c1a7b339c4bb7267839bd2015e283234e758100"}, + {file = "pyarrow_hotfix-0.7.tar.gz", hash = "sha256:59399cd58bdd978b2e42816a4183a55c6472d4e33d183351b6069f11ed42661d"}, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a"}, + {file = "pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6"}, +] + +[package.dependencies] +pyasn1 = ">=0.6.1,<0.7.0" + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pycryptodome" +version = "3.23.0" +description = "Cryptographic library for Python" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] +files = [ + {file = "pycryptodome-3.23.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a176b79c49af27d7f6c12e4b178b0824626f40a7b9fed08f712291b6d54bf566"}, + {file = "pycryptodome-3.23.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:573a0b3017e06f2cffd27d92ef22e46aa3be87a2d317a5abf7cc0e84e321bd75"}, + {file = "pycryptodome-3.23.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:63dad881b99ca653302b2c7191998dd677226222a3f2ea79999aa51ce695f720"}, + {file = "pycryptodome-3.23.0-cp27-cp27m-win32.whl", hash = "sha256:b34e8e11d97889df57166eda1e1ddd7676da5fcd4d71a0062a760e75060514b4"}, + {file = "pycryptodome-3.23.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:7ac1080a8da569bde76c0a104589c4f414b8ba296c0b3738cf39a466a9fb1818"}, + {file = "pycryptodome-3.23.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6fe8258e2039eceb74dfec66b3672552b6b7d2c235b2dfecc05d16b8921649a8"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:0011f7f00cdb74879142011f95133274741778abba114ceca229adbf8e62c3e4"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:90460fc9e088ce095f9ee8356722d4f10f86e5be06e2354230a9880b9c549aae"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4764e64b269fc83b00f682c47443c2e6e85b18273712b98aa43bcb77f8570477"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8f24adb74984aa0e5d07a2368ad95276cf38051fe2dc6605cbcf482e04f2a7"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d97618c9c6684a97ef7637ba43bdf6663a2e2e77efe0f863cce97a76af396446"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9a53a4fe5cb075075d515797d6ce2f56772ea7e6a1e5e4b96cf78a14bac3d265"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:763d1d74f56f031788e5d307029caef067febf890cd1f8bf61183ae142f1a77b"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:954af0e2bd7cea83ce72243b14e4fb518b18f0c1649b576d114973e2073b273d"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:257bb3572c63ad8ba40b89f6fc9d63a2a628e9f9708d31ee26560925ebe0210a"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6501790c5b62a29fcb227bd6b62012181d886a767ce9ed03b303d1f22eb5c625"}, + {file = "pycryptodome-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:9a77627a330ab23ca43b48b130e202582e91cc69619947840ea4d2d1be21eb39"}, + {file = "pycryptodome-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:187058ab80b3281b1de11c2e6842a357a1f71b42cb1e15bce373f3d238135c27"}, + {file = "pycryptodome-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:cfb5cd445280c5b0a4e6187a7ce8de5a07b5f3f897f235caa11f1f435f182843"}, + {file = "pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67bd81fcbe34f43ad9422ee8fd4843c8e7198dd88dd3d40e6de42ee65fbe1490"}, + {file = "pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8987bd3307a39bc03df5c8e0e3d8be0c4c3518b7f044b0f4c15d1aa78f52575"}, + {file = "pycryptodome-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa0698f65e5b570426fc31b8162ed4603b0c2841cbb9088e2b01641e3065915b"}, + {file = "pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:53ecbafc2b55353edcebd64bf5da94a2a2cdf5090a6915bcca6eca6cc452585a"}, + {file = "pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:156df9667ad9f2ad26255926524e1c136d6664b741547deb0a86a9acf5ea631f"}, + {file = "pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:dea827b4d55ee390dc89b2afe5927d4308a8b538ae91d9c6f7a5090f397af1aa"}, + {file = "pycryptodome-3.23.0-cp37-abi3-win32.whl", hash = "sha256:507dbead45474b62b2bbe318eb1c4c8ee641077532067fec9c1aa82c31f84886"}, + {file = "pycryptodome-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:c75b52aacc6c0c260f204cbdd834f76edc9fb0d8e0da9fbf8352ef58202564e2"}, + {file = "pycryptodome-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:11eeeb6917903876f134b56ba11abe95c0b0fd5e3330def218083c7d98bbcb3c"}, + {file = "pycryptodome-3.23.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:350ebc1eba1da729b35ab7627a833a1a355ee4e852d8ba0447fafe7b14504d56"}, + {file = "pycryptodome-3.23.0-pp27-pypy_73-win32.whl", hash = "sha256:93837e379a3e5fd2bb00302a47aee9fdf7940d83595be3915752c74033d17ca7"}, + {file = "pycryptodome-3.23.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ddb95b49df036ddd264a0ad246d1be5b672000f12d6961ea2c267083a5e19379"}, + {file = "pycryptodome-3.23.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e95564beb8782abfd9e431c974e14563a794a4944c29d6d3b7b5ea042110b4"}, + {file = "pycryptodome-3.23.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14e15c081e912c4b0d75632acd8382dfce45b258667aa3c67caf7a4d4c13f630"}, + {file = "pycryptodome-3.23.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7fc76bf273353dc7e5207d172b83f569540fc9a28d63171061c42e361d22353"}, + {file = "pycryptodome-3.23.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:45c69ad715ca1a94f778215a11e66b7ff989d792a4d63b68dc586a1da1392ff5"}, + {file = "pycryptodome-3.23.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:865d83c906b0fc6a59b510deceee656b6bc1c4fa0d82176e2b77e97a420a996a"}, + {file = "pycryptodome-3.23.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89d4d56153efc4d81defe8b65fd0821ef8b2d5ddf8ed19df31ba2f00872b8002"}, + {file = "pycryptodome-3.23.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3f2d0aaf8080bda0587d58fc9fe4766e012441e2eed4269a77de6aea981c8be"}, + {file = "pycryptodome-3.23.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64093fc334c1eccfd3933c134c4457c34eaca235eeae49d69449dc4728079339"}, + {file = "pycryptodome-3.23.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ce64e84a962b63a47a592690bdc16a7eaf709d2c2697ababf24a0def566899a6"}, + {file = "pycryptodome-3.23.0.tar.gz", hash = "sha256:447700a657182d60338bab09fdb27518f8856aecd80ae4c6bdddb67ff5da44ef"}, +] + +[[package]] +name = "pydantic" +version = "2.10.6" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.27.2" +typing-extensions = ">=4.12.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic" +version = "2.11.7" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.33.2" +typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-core" +version = "2.33.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.19.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, +] +markers = {dev = "python_version >= \"3.10\""} + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pymilvus" +version = "2.3.1" +description = "Python Sdk for Milvus" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "pymilvus-2.3.1-py3-none-any.whl", hash = "sha256:ce65e1de8700f33bd9aade20f013291629702e25b05726773208f1f0b22548ff"}, + {file = "pymilvus-2.3.1.tar.gz", hash = "sha256:d460f6204d7deb2cff93716bd65670c1b440694b77701fb0ab0ead791aa582c6"}, +] + +[package.dependencies] +environs = "<=9.5.0" +grpcio = ">=1.49.1,<=1.58.0" +minio = "*" +numpy = {version = "<1.25.0", markers = "python_version <= \"3.8\""} +pandas = ">=1.2.4" +protobuf = ">=3.20.0" +requests = "*" +ujson = ">=2.0.0" + +[[package]] +name = "pymysql" +version = "1.1.0" +description = "Pure Python MySQL Driver" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "PyMySQL-1.1.0-py3-none-any.whl", hash = "sha256:8969ec6d763c856f7073c4c64662882675702efcb114b4bcbb955aea3a069fa7"}, + {file = "PyMySQL-1.1.0.tar.gz", hash = "sha256:4f13a7df8bf36a51e81dd9f3605fede45a4878fe02f9236349fd82a3f0612f96"}, +] + +[package.extras] +ed25519 = ["PyNaCl (>=1.4.0)"] +rsa = ["cryptography"] + +[[package]] +name = "pypika" +version = "0.48.9" +description = "A SQL query builder API for Python" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "PyPika-0.48.9.tar.gz", hash = "sha256:838836a61747e7c8380cd1b7ff638694b7a7335345d0f559b04b2cd832ad5378"}, +] + +[[package]] +name = "pyproject-hooks" +version = "1.2.0" +description = "Wrappers to call pyproject.toml-based build backend hooks." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"}, + {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"}, +] + +[[package]] +name = "pyreadline3" +version = "3.5.4" +description = "A python implementation of GNU readline." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "sys_platform == \"win32\"" +files = [ + {file = "pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6"}, + {file = "pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7"}, +] + +[package.extras] +dev = ["build", "flake8", "mypy", "pytest", "twine"] + +[[package]] +name = "pytest" +version = "8.3.5" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, + {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest" +version = "8.4.1" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, + {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, +] + +[package.dependencies] +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""} +iniconfig = ">=1" +packaging = ">=20" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-cov" +version = "5.0.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, + {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, +] + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + +[[package]] +name = "pytest-mock" +version = "3.14.1" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0"}, + {file = "pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "python-dotenv" +version = "1.1.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"}, + {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pytz" +version = "2025.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, + {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "redis" +version = "5.0.1" +description = "Python client for Redis database and key-value store" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "redis-5.0.1-py3-none-any.whl", hash = "sha256:ed4802971884ae19d640775ba3b03aa2e7bd5e8fb8dfaed2decce4d0fc48391f"}, + {file = "redis-5.0.1.tar.gz", hash = "sha256:0dab495cd5753069d3bc650a0dde8a8f9edde16fc5691b689a566eda58100d0f"}, +] + +[package.dependencies] +async-timeout = {version = ">=4.0.2", markers = "python_full_version <= \"3.11.2\""} + +[package.extras] +hiredis = ["hiredis (>=1.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"] + +[[package]] +name = "regex" +version = "2024.11.6" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"}, + {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"}, + {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"}, + {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"}, + {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"}, + {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"}, + {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +description = "OAuthlib authentication support for Requests." +optional = false +python-versions = ">=3.4" +groups = ["main"] +files = [ + {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, + {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "rich" +version = "14.0.0" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0"}, + {file = "rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rsa" +version = "4.9.1" +description = "Pure-Python RSA implementation" +optional = false +python-versions = "<4,>=3.6" +groups = ["main"] +files = [ + {file = "rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762"}, + {file = "rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "safetensors" +version = "0.5.3" +description = "" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "safetensors-0.5.3-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd20eb133db8ed15b40110b7c00c6df51655a2998132193de2f75f72d99c7073"}, + {file = "safetensors-0.5.3-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:21d01c14ff6c415c485616b8b0bf961c46b3b343ca59110d38d744e577f9cce7"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11bce6164887cd491ca75c2326a113ba934be596e22b28b1742ce27b1d076467"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4a243be3590bc3301c821da7a18d87224ef35cbd3e5f5727e4e0728b8172411e"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8bd84b12b1670a6f8e50f01e28156422a2bc07fb16fc4e98bded13039d688a0d"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:391ac8cab7c829452175f871fcaf414aa1e292b5448bd02620f675a7f3e7abb9"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cead1fa41fc54b1e61089fa57452e8834f798cb1dc7a09ba3524f1eb08e0317a"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1077f3e94182d72618357b04b5ced540ceb71c8a813d3319f1aba448e68a770d"}, + {file = "safetensors-0.5.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:799021e78287bac619c7b3f3606730a22da4cda27759ddf55d37c8db7511c74b"}, + {file = "safetensors-0.5.3-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:df26da01aaac504334644e1b7642fa000bfec820e7cef83aeac4e355e03195ff"}, + {file = "safetensors-0.5.3-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:32c3ef2d7af8b9f52ff685ed0bc43913cdcde135089ae322ee576de93eae5135"}, + {file = "safetensors-0.5.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:37f1521be045e56fc2b54c606d4455573e717b2d887c579ee1dbba5f868ece04"}, + {file = "safetensors-0.5.3-cp38-abi3-win32.whl", hash = "sha256:cfc0ec0846dcf6763b0ed3d1846ff36008c6e7290683b61616c4b040f6a54ace"}, + {file = "safetensors-0.5.3-cp38-abi3-win_amd64.whl", hash = "sha256:836cbbc320b47e80acd40e44c8682db0e8ad7123209f69b093def21ec7cafd11"}, + {file = "safetensors-0.5.3.tar.gz", hash = "sha256:b6b0d6ecacec39a4fdd99cc19f4576f5219ce858e6fd8dbe7609df0b8dc56965"}, +] + +[package.extras] +all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"] +dev = ["safetensors[all]"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"] +mlx = ["mlx (>=0.0.9)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] +pinned-tf = ["safetensors[numpy]", "tensorflow (==2.18.0)"] +quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] +tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] +torch = ["safetensors[numpy]", "torch (>=1.10)"] + +[[package]] +name = "scipy" +version = "1.10.1" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = "<3.12,>=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"}, + {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"}, + {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"}, + {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"}, + {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"}, + {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"}, + {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"}, +] + +[package.dependencies] +numpy = ">=1.19.5,<1.27.0" + +[package.extras] +dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] +test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "scipy" +version = "1.15.3" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "scipy-1.15.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:a345928c86d535060c9c2b25e71e87c39ab2f22fc96e9636bd74d1dbf9de448c"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:ad3432cb0f9ed87477a8d97f03b763fd1d57709f1bbde3c9369b1dff5503b253"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:aef683a9ae6eb00728a542b796f52a5477b78252edede72b8327a886ab63293f"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:1c832e1bd78dea67d5c16f786681b28dd695a8cb1fb90af2e27580d3d0967e92"}, + {file = "scipy-1.15.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:263961f658ce2165bbd7b99fa5135195c3a12d9bef045345016b8b50c315cb82"}, + {file = "scipy-1.15.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2abc762b0811e09a0d3258abee2d98e0c703eee49464ce0069590846f31d40"}, + {file = "scipy-1.15.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ed7284b21a7a0c8f1b6e5977ac05396c0d008b89e05498c8b7e8f4a1423bba0e"}, + {file = "scipy-1.15.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5380741e53df2c566f4d234b100a484b420af85deb39ea35a1cc1be84ff53a5c"}, + {file = "scipy-1.15.3-cp310-cp310-win_amd64.whl", hash = "sha256:9d61e97b186a57350f6d6fd72640f9e99d5a4a2b8fbf4b9ee9a841eab327dc13"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:993439ce220d25e3696d1b23b233dd010169b62f6456488567e830654ee37a6b"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:34716e281f181a02341ddeaad584205bd2fd3c242063bd3423d61ac259ca7eba"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3b0334816afb8b91dab859281b1b9786934392aa3d527cd847e41bb6f45bee65"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:6db907c7368e3092e24919b5e31c76998b0ce1684d51a90943cb0ed1b4ffd6c1"}, + {file = "scipy-1.15.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:721d6b4ef5dc82ca8968c25b111e307083d7ca9091bc38163fb89243e85e3889"}, + {file = "scipy-1.15.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39cb9c62e471b1bb3750066ecc3a3f3052b37751c7c3dfd0fd7e48900ed52982"}, + {file = "scipy-1.15.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:795c46999bae845966368a3c013e0e00947932d68e235702b5c3f6ea799aa8c9"}, + {file = "scipy-1.15.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:18aaacb735ab38b38db42cb01f6b92a2d0d4b6aabefeb07f02849e47f8fb3594"}, + {file = "scipy-1.15.3-cp311-cp311-win_amd64.whl", hash = "sha256:ae48a786a28412d744c62fd7816a4118ef97e5be0bee968ce8f0a2fba7acf3bb"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ac6310fdbfb7aa6612408bd2f07295bcbd3fda00d2d702178434751fe48e019"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:185cd3d6d05ca4b44a8f1595af87f9c372bb6acf9c808e99aa3e9aa03bd98cf6"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:05dc6abcd105e1a29f95eada46d4a3f251743cfd7d3ae8ddb4088047f24ea477"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:06efcba926324df1696931a57a176c80848ccd67ce6ad020c810736bfd58eb1c"}, + {file = "scipy-1.15.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05045d8b9bfd807ee1b9f38761993297b10b245f012b11b13b91ba8945f7e45"}, + {file = "scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271e3713e645149ea5ea3e97b57fdab61ce61333f97cfae392c28ba786f9bb49"}, + {file = "scipy-1.15.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6cfd56fc1a8e53f6e89ba3a7a7251f7396412d655bca2aa5611c8ec9a6784a1e"}, + {file = "scipy-1.15.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ff17c0bb1cb32952c09217d8d1eed9b53d1463e5f1dd6052c7857f83127d539"}, + {file = "scipy-1.15.3-cp312-cp312-win_amd64.whl", hash = "sha256:52092bc0472cfd17df49ff17e70624345efece4e1a12b23783a1ac59a1b728ed"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c620736bcc334782e24d173c0fdbb7590a0a436d2fdf39310a8902505008759"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:7e11270a000969409d37ed399585ee530b9ef6aa99d50c019de4cb01e8e54e62"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8c9ed3ba2c8a2ce098163a9bdb26f891746d02136995df25227a20e71c396ebb"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0bdd905264c0c9cfa74a4772cdb2070171790381a5c4d312c973382fc6eaf730"}, + {file = "scipy-1.15.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79167bba085c31f38603e11a267d862957cbb3ce018d8b38f79ac043bc92d825"}, + {file = "scipy-1.15.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9deabd6d547aee2c9a81dee6cc96c6d7e9a9b1953f74850c179f91fdc729cb7"}, + {file = "scipy-1.15.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dde4fc32993071ac0c7dd2d82569e544f0bdaff66269cb475e0f369adad13f11"}, + {file = "scipy-1.15.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f77f853d584e72e874d87357ad70f44b437331507d1c311457bed8ed2b956126"}, + {file = "scipy-1.15.3-cp313-cp313-win_amd64.whl", hash = "sha256:b90ab29d0c37ec9bf55424c064312930ca5f4bde15ee8619ee44e69319aab163"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3ac07623267feb3ae308487c260ac684b32ea35fd81e12845039952f558047b8"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6487aa99c2a3d509a5227d9a5e889ff05830a06b2ce08ec30df6d79db5fcd5c5"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:50f9e62461c95d933d5c5ef4a1f2ebf9a2b4e83b0db374cb3f1de104d935922e"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14ed70039d182f411ffc74789a16df3835e05dc469b898233a245cdfd7f162cb"}, + {file = "scipy-1.15.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a769105537aa07a69468a0eefcd121be52006db61cdd8cac8a0e68980bbb723"}, + {file = "scipy-1.15.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db984639887e3dffb3928d118145ffe40eff2fa40cb241a306ec57c219ebbbb"}, + {file = "scipy-1.15.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:40e54d5c7e7ebf1aa596c374c49fa3135f04648a0caabcb66c52884b943f02b4"}, + {file = "scipy-1.15.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5e721fed53187e71d0ccf382b6bf977644c533e506c4d33c3fb24de89f5c3ed5"}, + {file = "scipy-1.15.3-cp313-cp313t-win_amd64.whl", hash = "sha256:76ad1fb5f8752eabf0fa02e4cc0336b4e8f021e2d5f061ed37d6d264db35e3ca"}, + {file = "scipy-1.15.3.tar.gz", hash = "sha256:eae3cf522bc7df64b42cad3925c876e1b0b6c35c1337c93e12c0f366f55b0eaf"}, +] + +[package.dependencies] +numpy = ">=1.23.5,<2.5" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.0.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "setuptools" +version = "75.3.2" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "setuptools-75.3.2-py3-none-any.whl", hash = "sha256:90ab613b6583fc02d5369cbca13ea26ea0e182d1df2d943ee9cbe81d4c61add9"}, + {file = "setuptools-75.3.2.tar.gz", hash = "sha256:3c1383e1038b68556a382c1e8ded8887cd20141b0eb5708a6c8d277de49364f5"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.5.2) ; sys_platform != \"cygwin\""] +core = ["importlib-metadata (>=6) ; python_version < \"3.10\"", "importlib-resources (>=5.10.2) ; python_version < \"3.9\"", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "ruff (<=0.7.1)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.12.*)", "pytest-mypy"] + +[[package]] +name = "setuptools" +version = "80.9.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"}, + {file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] + +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + +[[package]] +name = "simplejson" +version = "3.20.1" +description = "Simple, fast, extensible JSON encoder/decoder for Python" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.5" +groups = ["main"] +files = [ + {file = "simplejson-3.20.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:f5272b5866b259fe6c33c4a8c5073bf8b359c3c97b70c298a2f09a69b52c7c41"}, + {file = "simplejson-3.20.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5c0de368f3052a59a1acf21f8b2dd28686a9e4eba2da7efae7ed9554cb31e7bc"}, + {file = "simplejson-3.20.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0821871404a537fd0e22eba240c74c0467c28af6cc435903eca394cfc74a0497"}, + {file = "simplejson-3.20.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:c939a1e576bded47d7d03aa2afc2ae90b928b2cf1d9dc2070ceec51fd463f430"}, + {file = "simplejson-3.20.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:3c4f0a61cdc05550782ca4a2cdb311ea196c2e6be6b24a09bf71360ca8c3ca9b"}, + {file = "simplejson-3.20.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:6c21f5c026ca633cfffcb6bc1fac2e99f65cb2b24657d3bef21aed9916cc3bbf"}, + {file = "simplejson-3.20.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:8d23b7f8d6b72319d6d55a0261089ff621ce87e54731c2d3de6a9bf7be5c028c"}, + {file = "simplejson-3.20.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:cda5c32a98f392909088111ecec23f2b0d39346ceae1a0fea23ab2d1f84ec21d"}, + {file = "simplejson-3.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e580aa65d5f6c3bf41b9b4afe74be5d5ddba9576701c107c772d936ea2b5043a"}, + {file = "simplejson-3.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a586ce4f78cec11f22fe55c5bee0f067e803aab9bad3441afe2181693b5ebb5"}, + {file = "simplejson-3.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74a1608f9e6e8c27a4008d70a54270868306d80ed48c9df7872f9f4b8ac87808"}, + {file = "simplejson-3.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03db8cb64154189a92a7786209f24e391644f3a3fa335658be2df2af1960b8d8"}, + {file = "simplejson-3.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eea7e2b7d858f6fdfbf0fe3cb846d6bd8a45446865bc09960e51f3d473c2271b"}, + {file = "simplejson-3.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e66712b17d8425bb7ff8968d4c7c7fd5a2dd7bd63728b28356223c000dd2f91f"}, + {file = "simplejson-3.20.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2cc4f6486f9f515b62f5831ff1888886619b84fc837de68f26d919ba7bbdcbc"}, + {file = "simplejson-3.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a3c2df555ee4016148fa192e2b9cd9e60bc1d40769366134882685e90aee2a1e"}, + {file = "simplejson-3.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:78520f04b7548a5e476b5396c0847e066f1e0a4c0c5e920da1ad65e95f410b11"}, + {file = "simplejson-3.20.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f4bd49ecde87b0fe9f55cc971449a32832bca9910821f7072bbfae1155eaa007"}, + {file = "simplejson-3.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7eaae2b88eb5da53caaffdfa50e2e12022553949b88c0df4f9a9663609373f72"}, + {file = "simplejson-3.20.1-cp310-cp310-win32.whl", hash = "sha256:e836fb88902799eac8debc2b642300748f4860a197fa3d9ea502112b6bb8e142"}, + {file = "simplejson-3.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a19b552b212fc3b5b96fc5ce92333d4a9ac0a800803e1f17ebb16dac4be5"}, + {file = "simplejson-3.20.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:325b8c107253d3217e89d7b50c71015b5b31e2433e6c5bf38967b2f80630a8ca"}, + {file = "simplejson-3.20.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88a7baa8211089b9e58d78fbc1b0b322103f3f3d459ff16f03a36cece0d0fcf0"}, + {file = "simplejson-3.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:299b1007b8101d50d95bc0db1bf5c38dc372e85b504cf77f596462083ee77e3f"}, + {file = "simplejson-3.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ec618ed65caab48e81e3ed29586236a8e57daef792f1f3bb59504a7e98cd10"}, + {file = "simplejson-3.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2cdead1d3197f0ff43373cf4730213420523ba48697743e135e26f3d179f38"}, + {file = "simplejson-3.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3466d2839fdc83e1af42e07b90bc8ff361c4e8796cd66722a40ba14e458faddd"}, + {file = "simplejson-3.20.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d492ed8e92f3a9f9be829205f44b1d0a89af6582f0cf43e0d129fa477b93fe0c"}, + {file = "simplejson-3.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f924b485537b640dc69434565463fd6fc0c68c65a8c6e01a823dd26c9983cf79"}, + {file = "simplejson-3.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e8eacf6a3491bf76ea91a8d46726368a6be0eb94993f60b8583550baae9439e"}, + {file = "simplejson-3.20.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d34d04bf90b4cea7c22d8b19091633908f14a096caa301b24c2f3d85b5068fb8"}, + {file = "simplejson-3.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69dd28d4ce38390ea4aaf212902712c0fd1093dc4c1ff67e09687c3c3e15a749"}, + {file = "simplejson-3.20.1-cp311-cp311-win32.whl", hash = "sha256:dfe7a9da5fd2a3499436cd350f31539e0a6ded5da6b5b3d422df016444d65e43"}, + {file = "simplejson-3.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:896a6c04d7861d507d800da7642479c3547060bf97419d9ef73d98ced8258766"}, + {file = "simplejson-3.20.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f31c4a3a7ab18467ee73a27f3e59158255d1520f3aad74315edde7a940f1be23"}, + {file = "simplejson-3.20.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:884e6183d16b725e113b83a6fc0230152ab6627d4d36cb05c89c2c5bccfa7bc6"}, + {file = "simplejson-3.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03d7a426e416fe0d3337115f04164cd9427eb4256e843a6b8751cacf70abc832"}, + {file = "simplejson-3.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:000602141d0bddfcff60ea6a6e97d5e10c9db6b17fd2d6c66199fa481b6214bb"}, + {file = "simplejson-3.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:af8377a8af78226e82e3a4349efdde59ffa421ae88be67e18cef915e4023a595"}, + {file = "simplejson-3.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c7de4c88ab2fbcb8781a3b982ef883696736134e20b1210bca43fb42ff1acf"}, + {file = "simplejson-3.20.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:455a882ff3f97d810709f7b620007d4e0aca8da71d06fc5c18ba11daf1c4df49"}, + {file = "simplejson-3.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fc0f523ce923e7f38eb67804bc80e0a028c76d7868500aa3f59225574b5d0453"}, + {file = "simplejson-3.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76461ec929282dde4a08061071a47281ad939d0202dc4e63cdd135844e162fbc"}, + {file = "simplejson-3.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19c2da8c043607bde4d4ef3a6b633e668a7d2e3d56f40a476a74c5ea71949f"}, + {file = "simplejson-3.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2578bedaedf6294415197b267d4ef678fea336dd78ee2a6d2f4b028e9d07be3"}, + {file = "simplejson-3.20.1-cp312-cp312-win32.whl", hash = "sha256:339f407373325a36b7fd744b688ba5bae0666b5d340ec6d98aebc3014bf3d8ea"}, + {file = "simplejson-3.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:627d4486a1ea7edf1f66bb044ace1ce6b4c1698acd1b05353c97ba4864ea2e17"}, + {file = "simplejson-3.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:71e849e7ceb2178344998cbe5ade101f1b329460243c79c27fbfc51c0447a7c3"}, + {file = "simplejson-3.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b63fdbab29dc3868d6f009a59797cefaba315fd43cd32ddd998ee1da28e50e29"}, + {file = "simplejson-3.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1190f9a3ce644fd50ec277ac4a98c0517f532cfebdcc4bd975c0979a9f05e1fb"}, + {file = "simplejson-3.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1336ba7bcb722ad487cd265701ff0583c0bb6de638364ca947bb84ecc0015d1"}, + {file = "simplejson-3.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e975aac6a5acd8b510eba58d5591e10a03e3d16c1cf8a8624ca177491f7230f0"}, + {file = "simplejson-3.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a6dd11ee282937ad749da6f3b8d87952ad585b26e5edfa10da3ae2536c73078"}, + {file = "simplejson-3.20.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab980fcc446ab87ea0879edad41a5c28f2d86020014eb035cf5161e8de4474c6"}, + {file = "simplejson-3.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f5aee2a4cb6b146bd17333ac623610f069f34e8f31d2f4f0c1a2186e50c594f0"}, + {file = "simplejson-3.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:652d8eecbb9a3b6461b21ec7cf11fd0acbab144e45e600c817ecf18e4580b99e"}, + {file = "simplejson-3.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8c09948f1a486a89251ee3a67c9f8c969b379f6ffff1a6064b41fea3bce0a112"}, + {file = "simplejson-3.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cbbd7b215ad4fc6f058b5dd4c26ee5c59f72e031dfda3ac183d7968a99e4ca3a"}, + {file = "simplejson-3.20.1-cp313-cp313-win32.whl", hash = "sha256:ae81e482476eaa088ef9d0120ae5345de924f23962c0c1e20abbdff597631f87"}, + {file = "simplejson-3.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:1b9fd15853b90aec3b1739f4471efbf1ac05066a2c7041bf8db821bb73cd2ddc"}, + {file = "simplejson-3.20.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c7edf279c1376f28bf41e916c015a2a08896597869d57d621f55b6a30c7e1e6d"}, + {file = "simplejson-3.20.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9202b9de38f12e99a40addd1a8d508a13c77f46d87ab1f9095f154667f4fe81"}, + {file = "simplejson-3.20.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:391345b4157cc4e120027e013bd35c45e2c191e2bf48b8913af488cdc3b9243c"}, + {file = "simplejson-3.20.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6fdcc9debb711ddd2ad6d69f9386a3d9e8e253234bbb30513e0a7caa9510c51"}, + {file = "simplejson-3.20.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9daf8cdc7ee8a9e9f7a3b313ba0a003391857e90d0e82fbcd4d614aa05cb7c3b"}, + {file = "simplejson-3.20.1-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:c02f4868a3a46ffe284a51a88d134dc96feff6079a7115164885331a1ba8ed9f"}, + {file = "simplejson-3.20.1-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:3d7310172d5340febd258cb147f46aae30ad57c445f4d7e1ae8461c10aaf43b0"}, + {file = "simplejson-3.20.1-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:4762e05577955312a4c6802f58dd02e040cc79ae59cda510aa1564d84449c102"}, + {file = "simplejson-3.20.1-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:8bb98fdf318c05aefd08a92583bd6ee148e93c6756fb1befb7b2d5f27824be78"}, + {file = "simplejson-3.20.1-cp36-cp36m-win32.whl", hash = "sha256:9a74e70818818981294b8e6956ce3496c5e1bd4726ac864fae473197671f7b85"}, + {file = "simplejson-3.20.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e041add470e8f8535cc05509485eb7205729a84441f03b25cde80ad48823792e"}, + {file = "simplejson-3.20.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7e9d73f46119240e4f4f07868241749d67d09873f40cb968d639aa9ccc488b86"}, + {file = "simplejson-3.20.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae6e637dc24f8fee332ed23dd070e81394138e42cd4fd9d0923e5045ba122e27"}, + {file = "simplejson-3.20.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:efd3bc6c6b17e3d4620eb6be5196f0d1c08b6ce7c3101fa8e292b79e0908944b"}, + {file = "simplejson-3.20.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87fc623d457173a0213bc9ca4e346b83c9d443f63ed5cca847fb0cacea3cfc95"}, + {file = "simplejson-3.20.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec6a1e0a7aff76f0e008bebfa950188b9c50b58c1885d898145f48fc8e189a56"}, + {file = "simplejson-3.20.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:9c079606f461a6e950099167e21e13985147c8a24be8eea66c9ad68f73fad744"}, + {file = "simplejson-3.20.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:9faceb68fba27ef17eda306e4cd97a7b4b14fdadca5fbb15790ba8b26ebeec0c"}, + {file = "simplejson-3.20.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:7ceed598e4bacbf5133fe7a418f7991bb2df0683f3ac11fbf9e36a2bc7aa4b85"}, + {file = "simplejson-3.20.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ede69c765e9901861ad7c6139023b7b7d5807c48a2539d817b4ab40018002d5f"}, + {file = "simplejson-3.20.1-cp37-cp37m-win32.whl", hash = "sha256:d8853c269a4c5146ddca4aa7c70e631795e9d11239d5fedb1c6bbc91ffdebcac"}, + {file = "simplejson-3.20.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ed6a17fd397f0e2b3ad668fc9e19253ed2e3875ad9086bd7f795c29a3223f4a1"}, + {file = "simplejson-3.20.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7551682b60bba3a9e2780742e101cf0a64250e76de7d09b1c4b0c8a7c7cc6834"}, + {file = "simplejson-3.20.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bd9577ec1c8c3a43040e3787711e4c257c70035b7551a21854b5dec88dad09e1"}, + {file = "simplejson-3.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8e197e4cf6d42c2c57e7c52cd7c1e7b3e37c5911df1314fb393320131e2101"}, + {file = "simplejson-3.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bd09c8c75666e7f62a33d2f1fb57f81da1fcbb19a9fe7d7910b5756e1dd6048"}, + {file = "simplejson-3.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bd6bfe5678d73fbd5328eea6a35216503796428fc47f1237432522febaf3a0c"}, + {file = "simplejson-3.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71b75d448fd0ceb2e7c90e72bb82c41f8462550d48529980bc0bab1d2495bfbb"}, + {file = "simplejson-3.20.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7e15b716d09f318c8cda3e20f82fae81684ce3d3acd1d7770fa3007df1769de"}, + {file = "simplejson-3.20.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3e7963197d958fcf9e98b212b80977d56c022384621ff463d98afc3b6b1ce7e8"}, + {file = "simplejson-3.20.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:2e671dd62051129185d3a9a92c60101f56cbc174854a1a3dfb69114ebd9e1699"}, + {file = "simplejson-3.20.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e25b2a0c396f3b84fb89573d07b0e1846ed563eb364f2ea8230ca92b8a8cb786"}, + {file = "simplejson-3.20.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:489c3a43116082bad56795215786313832ba3991cca1f55838e52a553f451ab6"}, + {file = "simplejson-3.20.1-cp38-cp38-win32.whl", hash = "sha256:4a92e948bad8df7fa900ba2ba0667a98303f3db206cbaac574935c332838208e"}, + {file = "simplejson-3.20.1-cp38-cp38-win_amd64.whl", hash = "sha256:49d059b8363327eee3c94799dd96782314b2dbd7bcc293b4ad48db69d6f4d362"}, + {file = "simplejson-3.20.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a8011f1dd1d676befcd4d675ebdbfdbbefd3bf350052b956ba8c699fca7d8cef"}, + {file = "simplejson-3.20.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e91703a4c5fec53e36875ae426ad785f4120bd1d93b65bed4752eeccd1789e0c"}, + {file = "simplejson-3.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e39eaa57c7757daa25bcd21f976c46be443b73dd6c3da47fe5ce7b7048ccefe2"}, + {file = "simplejson-3.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceab2ce2acdc7fbaa433a93006758db6ba9a659e80c4faa13b80b9d2318e9b17"}, + {file = "simplejson-3.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d4f320c33277a5b715db5bf5b10dae10c19076bd6d66c2843e04bd12d1f1ea5"}, + {file = "simplejson-3.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b6436c48e64378fa844d8c9e58a5ed0352bbcfd4028369a9b46679b7ab79d2d"}, + {file = "simplejson-3.20.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e18345c8dda5d699be8166b61f9d80aaee4545b709f1363f60813dc032dac53"}, + {file = "simplejson-3.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:90b573693d1526bed576f6817e2a492eaaef68f088b57d7a9e83d122bbb49e51"}, + {file = "simplejson-3.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:272cc767826e924a6bd369ea3dbf18e166ded29059c7a4d64d21a9a22424b5b5"}, + {file = "simplejson-3.20.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:51b41f284d603c4380732d7d619f8b34bd04bc4aa0ed0ed5f4ffd0539b14da44"}, + {file = "simplejson-3.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6e6697a3067d281f01de0fe96fc7cba4ea870d96d7deb7bfcf85186d74456503"}, + {file = "simplejson-3.20.1-cp39-cp39-win32.whl", hash = "sha256:6dd3a1d5aca87bf947f3339b0f8e8e329f1badf548bdbff37fac63c17936da8e"}, + {file = "simplejson-3.20.1-cp39-cp39-win_amd64.whl", hash = "sha256:463f1fca8fbf23d088e5850fdd0dd4d5faea8900a9f9680270bd98fd649814ca"}, + {file = "simplejson-3.20.1-py3-none-any.whl", hash = "sha256:8a6c1bbac39fa4a79f83cbf1df6ccd8ff7069582a9fd8db1e52cea073bc2c697"}, + {file = "simplejson-3.20.1.tar.gz", hash = "sha256:e64139b4ec4f1f24c142ff7dcafe55a22b811a74d86d66560c8815687143037d"}, +] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "snowflake-id" +version = "1.0.2" +description = "The Snowflake generator done right" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "snowflake_id-1.0.2-py3-none-any.whl", hash = "sha256:c3142752ed334b9d4632739a704461d13e97aa6fa71b65d5329829b4b5cc058c"}, + {file = "snowflake_id-1.0.2.tar.gz", hash = "sha256:4a4cd49508599aaa16488311f4b9121e9ac3bac57edff3e9f71b1d79086dbfe8"}, +] + +[[package]] +name = "sortedcontainers" +version = "2.4.0" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, + {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, +] + +[[package]] +name = "starlette" +version = "0.41.3" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "starlette-0.41.3-py3-none-any.whl", hash = "sha256:44cedb2b7c77a9de33a8b74b2b90e9f50d11fcf25d8270ea525ad71a25374ff7"}, + {file = "starlette-0.41.3.tar.gz", hash = "sha256:0e4ab3d16522a255be6b28260b938eae2482f98ce5cc934cb08dce8dc3ba5835"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] + +[[package]] +name = "sympy" +version = "1.13.3" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, + {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, +] + +[package.dependencies] +mpmath = ">=1.1.0,<1.4" + +[package.extras] +dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] + +[[package]] +name = "sympy" +version = "1.14.0" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5"}, + {file = "sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517"}, +] + +[package.dependencies] +mpmath = ">=1.1.0,<1.4" + +[package.extras] +dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] + +[[package]] +name = "tenacity" +version = "9.0.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, + {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "tenacity" +version = "9.1.2" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138"}, + {file = "tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "tokenizers" +version = "0.15.2" +description = "" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tokenizers-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52f6130c9cbf70544287575a985bf44ae1bda2da7e8c24e97716080593638012"}, + {file = "tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:054c1cc9c6d68f7ffa4e810b3d5131e0ba511b6e4be34157aa08ee54c2f8d9ee"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9b9b070fdad06e347563b88c278995735292ded1132f8657084989a4c84a6d5"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea621a7eef4b70e1f7a4e84dd989ae3f0eeb50fc8690254eacc08acb623e82f1"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf7fd9a5141634fa3aa8d6b7be362e6ae1b4cda60da81388fa533e0b552c98fd"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44f2a832cd0825295f7179eaf173381dc45230f9227ec4b44378322d900447c9"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b9ec69247a23747669ec4b0ca10f8e3dfb3545d550258129bd62291aabe8605"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b6a4c78da863ff26dbd5ad9a8ecc33d8a8d97b535172601cf00aee9d7ce9ce"}, + {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5ab2a4d21dcf76af60e05af8063138849eb1d6553a0d059f6534357bce8ba364"}, + {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a47acfac7e511f6bbfcf2d3fb8c26979c780a91e06fb5b9a43831b2c0153d024"}, + {file = "tokenizers-0.15.2-cp310-none-win32.whl", hash = "sha256:064ff87bb6acdbd693666de9a4b692add41308a2c0ec0770d6385737117215f2"}, + {file = "tokenizers-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3b919afe4df7eb6ac7cafd2bd14fb507d3f408db7a68c43117f579c984a73843"}, + {file = "tokenizers-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:89cd1cb93e4b12ff39bb2d626ad77e35209de9309a71e4d3d4672667b4b256e7"}, + {file = "tokenizers-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cfed5c64e5be23d7ee0f0e98081a25c2a46b0b77ce99a4f0605b1ec43dd481fa"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a907d76dcfda37023ba203ab4ceeb21bc5683436ebefbd895a0841fd52f6f6f2"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ea60479de6fc7b8ae756b4b097572372d7e4032e2521c1bbf3d90c90a99ff0"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48e2b9335be2bc0171df9281385c2ed06a15f5cf121c44094338306ab7b33f2c"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:112a1dd436d2cc06e6ffdc0b06d55ac019a35a63afd26475205cb4b1bf0bfbff"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4620cca5c2817177ee8706f860364cc3a8845bc1e291aaf661fb899e5d1c45b0"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccd73a82751c523b3fc31ff8194702e4af4db21dc20e55b30ecc2079c5d43cb7"}, + {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:107089f135b4ae7817affe6264f8c7a5c5b4fd9a90f9439ed495f54fcea56fb4"}, + {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0ff110ecc57b7aa4a594396525a3451ad70988e517237fe91c540997c4e50e29"}, + {file = "tokenizers-0.15.2-cp311-none-win32.whl", hash = "sha256:6d76f00f5c32da36c61f41c58346a4fa7f0a61be02f4301fd30ad59834977cc3"}, + {file = "tokenizers-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:cc90102ed17271cf0a1262babe5939e0134b3890345d11a19c3145184b706055"}, + {file = "tokenizers-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f86593c18d2e6248e72fb91c77d413a815153b8ea4e31f7cd443bdf28e467670"}, + {file = "tokenizers-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0774bccc6608eca23eb9d620196687c8b2360624619623cf4ba9dc9bd53e8b51"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d0222c5b7c9b26c0b4822a82f6a7011de0a9d3060e1da176f66274b70f846b98"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3835738be1de66624fff2f4f6f6684775da4e9c00bde053be7564cbf3545cc66"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0143e7d9dcd811855c1ce1ab9bf5d96d29bf5e528fd6c7824d0465741e8c10fd"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db35825f6d54215f6b6009a7ff3eedee0848c99a6271c870d2826fbbedf31a38"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f5e64b0389a2be47091d8cc53c87859783b837ea1a06edd9d8e04004df55a5c"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e0480c452217edd35eca56fafe2029fb4d368b7c0475f8dfa3c5c9c400a7456"}, + {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a33ab881c8fe70474980577e033d0bc9a27b7ab8272896e500708b212995d834"}, + {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a308a607ca9de2c64c1b9ba79ec9a403969715a1b8ba5f998a676826f1a7039d"}, + {file = "tokenizers-0.15.2-cp312-none-win32.whl", hash = "sha256:b8fcfa81bcb9447df582c5bc96a031e6df4da2a774b8080d4f02c0c16b42be0b"}, + {file = "tokenizers-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:38d7ab43c6825abfc0b661d95f39c7f8af2449364f01d331f3b51c94dcff7221"}, + {file = "tokenizers-0.15.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:38bfb0204ff3246ca4d5e726e8cc8403bfc931090151e6eede54d0e0cf162ef0"}, + {file = "tokenizers-0.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c861d35e8286a53e06e9e28d030b5a05bcbf5ac9d7229e561e53c352a85b1fc"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:936bf3842db5b2048eaa53dade907b1160f318e7c90c74bfab86f1e47720bdd6"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:620beacc3373277700d0e27718aa8b25f7b383eb8001fba94ee00aeea1459d89"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2735ecbbf37e52db4ea970e539fd2d450d213517b77745114f92867f3fc246eb"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:473c83c5e2359bb81b0b6fde870b41b2764fcdd36d997485e07e72cc3a62264a"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968fa1fb3c27398b28a4eca1cbd1e19355c4d3a6007f7398d48826bbe3a0f728"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:865c60ae6eaebdde7da66191ee9b7db52e542ed8ee9d2c653b6d190a9351b980"}, + {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7c0d8b52664ab2d4a8d6686eb5effc68b78608a9008f086a122a7b2996befbab"}, + {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f33dfbdec3784093a9aebb3680d1f91336c56d86cc70ddf88708251da1fe9064"}, + {file = "tokenizers-0.15.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d44ba80988ff9424e33e0a49445072ac7029d8c0e1601ad25a0ca5f41ed0c1d6"}, + {file = "tokenizers-0.15.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:dce74266919b892f82b1b86025a613956ea0ea62a4843d4c4237be2c5498ed3a"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0ef06b9707baeb98b316577acb04f4852239d856b93e9ec3a299622f6084e4be"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73e2e74bbb07910da0d37c326869f34113137b23eadad3fc00856e6b3d9930c"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eeb12daf02a59e29f578a865f55d87cd103ce62bd8a3a5874f8fdeaa82e336b"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ba9f6895af58487ca4f54e8a664a322f16c26bbb442effd01087eba391a719e"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccec77aa7150e38eec6878a493bf8c263ff1fa8a62404e16c6203c64c1f16a26"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f40604f5042ff210ba82743dda2b6aa3e55aa12df4e9f2378ee01a17e2855e"}, + {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5645938a42d78c4885086767c70923abad047163d809c16da75d6b290cb30bbe"}, + {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05a77cbfebe28a61ab5c3891f9939cc24798b63fa236d84e5f29f3a85a200c00"}, + {file = "tokenizers-0.15.2-cp37-none-win32.whl", hash = "sha256:361abdc068e8afe9c5b818769a48624687fb6aaed49636ee39bec4e95e1a215b"}, + {file = "tokenizers-0.15.2-cp37-none-win_amd64.whl", hash = "sha256:7ef789f83eb0f9baeb4d09a86cd639c0a5518528f9992f38b28e819df397eb06"}, + {file = "tokenizers-0.15.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4fe1f74a902bee74a3b25aff180fbfbf4f8b444ab37c4d496af7afd13a784ed2"}, + {file = "tokenizers-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4b89038a684f40a6b15d6b09f49650ac64d951ad0f2a3ea9169687bbf2a8ba"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d05a1b06f986d41aed5f2de464c003004b2df8aaf66f2b7628254bcbfb72a438"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508711a108684111ec8af89d3a9e9e08755247eda27d0ba5e3c50e9da1600f6d"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:daa348f02d15160cb35439098ac96e3a53bacf35885072611cd9e5be7d333daa"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494fdbe5932d3416de2a85fc2470b797e6f3226c12845cadf054dd906afd0442"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2d60f5246f4da9373f75ff18d64c69cbf60c3bca597290cea01059c336d2470"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93268e788825f52de4c7bdcb6ebc1fcd4a5442c02e730faa9b6b08f23ead0e24"}, + {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6fc7083ab404019fc9acafe78662c192673c1e696bd598d16dc005bd663a5cf9"}, + {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e39b41e5531d6b2122a77532dbea60e171ef87a3820b5a3888daa847df4153"}, + {file = "tokenizers-0.15.2-cp38-none-win32.whl", hash = "sha256:06cd0487b1cbfabefb2cc52fbd6b1f8d4c37799bd6c6e1641281adaa6b2504a7"}, + {file = "tokenizers-0.15.2-cp38-none-win_amd64.whl", hash = "sha256:5179c271aa5de9c71712e31cb5a79e436ecd0d7532a408fa42a8dbfa4bc23fd9"}, + {file = "tokenizers-0.15.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82f8652a74cc107052328b87ea8b34291c0f55b96d8fb261b3880216a9f9e48e"}, + {file = "tokenizers-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:02458bee6f5f3139f1ebbb6d042b283af712c0981f5bc50edf771d6b762d5e4f"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9a09cd26cca2e1c349f91aa665309ddb48d71636370749414fbf67bc83c5343"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158be8ea8554e5ed69acc1ce3fbb23a06060bd4bbb09029431ad6b9a466a7121"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ddba9a2b0c8c81633eca0bb2e1aa5b3a15362b1277f1ae64176d0f6eba78ab1"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ef5dd1d39797044642dbe53eb2bc56435308432e9c7907728da74c69ee2adca"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:454c203164e07a860dbeb3b1f4a733be52b0edbb4dd2e5bd75023ffa8b49403a"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cf6b7f1d4dc59af960e6ffdc4faffe6460bbfa8dce27a58bf75755ffdb2526d"}, + {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2ef09bbc16519f6c25d0c7fc0c6a33a6f62923e263c9d7cca4e58b8c61572afb"}, + {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c9a2ebdd2ad4ec7a68e7615086e633857c85e2f18025bd05d2a4399e6c5f7169"}, + {file = "tokenizers-0.15.2-cp39-none-win32.whl", hash = "sha256:918fbb0eab96fe08e72a8c2b5461e9cce95585d82a58688e7f01c2bd546c79d0"}, + {file = "tokenizers-0.15.2-cp39-none-win_amd64.whl", hash = "sha256:524e60da0135e106b254bd71f0659be9f89d83f006ea9093ce4d1fab498c6d0d"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9b648a58281c4672212fab04e60648fde574877d0139cd4b4f93fe28ca8944"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7c7d18b733be6bbca8a55084027f7be428c947ddf871c500ee603e375013ffba"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:13ca3611de8d9ddfbc4dc39ef54ab1d2d4aaa114ac8727dfdc6a6ec4be017378"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237d1bf3361cf2e6463e6c140628e6406766e8b27274f5fcc62c747ae3c6f094"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a0fe1e49e60c664915e9fb6b0cb19bac082ab1f309188230e4b2920230edb3"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e022fe65e99230b8fd89ebdfea138c24421f91c1a4f4781a8f5016fd5cdfb4d"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d857be2df69763362ac699f8b251a8cd3fac9d21893de129bc788f8baaef2693"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:708bb3e4283177236309e698da5fcd0879ce8fd37457d7c266d16b550bcbbd18"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c35e09e9899b72a76e762f9854e8750213f67567787d45f37ce06daf57ca78"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1257f4394be0d3b00de8c9e840ca5601d0a4a8438361ce9c2b05c7d25f6057b"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02272fe48280e0293a04245ca5d919b2c94a48b408b55e858feae9618138aeda"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dc3ad9ebc76eabe8b1d7c04d38be884b8f9d60c0cdc09b0aa4e3bcf746de0388"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:32e16bdeffa7c4f46bf2152172ca511808b952701d13e7c18833c0b73cb5c23f"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fb16ba563d59003028b678d2361a27f7e4ae0ab29c7a80690efa20d829c81fdb"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2277c36d2d6cdb7876c274547921a42425b6810d38354327dd65a8009acf870c"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cf75d32e8d250781940d07f7eece253f2fe9ecdb1dc7ba6e3833fa17b82fcbc"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b3b31884dc8e9b21508bb76da80ebf7308fdb947a17affce815665d5c4d028"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10122d8d8e30afb43bb1fe21a3619f62c3e2574bff2699cf8af8b0b6c5dc4a3"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d88b96ff0fe8e91f6ef01ba50b0d71db5017fa4e3b1d99681cec89a85faf7bf7"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:37aaec5a52e959892870a7c47cef80c53797c0db9149d458460f4f31e2fb250e"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2ea752f2b0fe96eb6e2f3adbbf4d72aaa1272079b0dfa1145507bd6a5d537e6"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b19a808d8799fda23504a5cd31d2f58e6f52f140380082b352f877017d6342b"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c86e5e068ac8b19204419ed8ca90f9d25db20578f5881e337d203b314f4104"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de19c4dc503c612847edf833c82e9f73cd79926a384af9d801dcf93f110cea4e"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea09acd2fe3324174063d61ad620dec3bcf042b495515f27f638270a7d466e8b"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cf27fd43472e07b57cf420eee1e814549203d56de00b5af8659cb99885472f1f"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7ca22bd897537a0080521445d91a58886c8c04084a6a19e6c78c586e0cfa92a5"}, + {file = "tokenizers-0.15.2.tar.gz", hash = "sha256:e6e9c6e019dd5484be5beafc775ae6c925f4c69a3487040ed09b45e13df2cb91"}, +] + +[package.dependencies] +huggingface_hub = ">=0.16.4,<1.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] + +[[package]] +name = "tomli" +version = "2.2.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +markers = "python_version <= \"3.10\"" +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, +] + +[[package]] +name = "torch" +version = "2.1.1" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "torch-2.1.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:5ebc43f5355a9b7be813392b3fb0133991f0380f6f0fcc8218d5468dc45d1071"}, + {file = "torch-2.1.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:84fefd63356416c0cd20578637ccdbb82164993400ed17b57c951dd6376dcee8"}, + {file = "torch-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:0a7a9da0c324409bcb5a7bdad1b4e94e936d21c2590aaa7ac2f63968da8c62f7"}, + {file = "torch-2.1.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:1e1e5faddd43a8f2c0e0e22beacd1e235a2e447794d807483c94a9e31b54a758"}, + {file = "torch-2.1.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:e76bf3c5c354874f1da465c852a2fb60ee6cbce306e935337885760f080f9baa"}, + {file = "torch-2.1.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:98fea993639b0bb432dfceb7b538f07c0f1c33386d63f635219f49254968c80f"}, + {file = "torch-2.1.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:61b51b33c61737c287058b0c3061e6a9d3c363863e4a094f804bc486888a188a"}, + {file = "torch-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:1d70920da827e2276bf07f7ec46958621cad18d228c97da8f9c19638474dbd52"}, + {file = "torch-2.1.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:a70593806f1d7e6b53657d96810518da0f88ef2608c98a402955765b8c79d52c"}, + {file = "torch-2.1.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:e312f7e82e49565f7667b0bbf9559ab0c597063d93044740781c02acd5a87978"}, + {file = "torch-2.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1e3cbecfa5a7314d828f4a37b0c286714dc9aa2e69beb7a22f7aca76567ed9f4"}, + {file = "torch-2.1.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:9ca0fcbf3d5ba644d6a8572c83a9abbdf5f7ff575bc38529ef6c185a3a71bde9"}, + {file = "torch-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:2dc9f312fc1fa0d61a565a0292ad73119d4b74c9f8b5031b55f8b4722abca079"}, + {file = "torch-2.1.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:d56b032176458e2af4709627bbd2c20fe2917eff8cd087a7fe313acccf5ce2f1"}, + {file = "torch-2.1.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:29e3b90a8c281f6660804a939d1f4218604c80162e521e1e6d8c8557325902a0"}, + {file = "torch-2.1.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:bd95cee8511584b67ddc0ba465c3f1edeb5708d833ee02af1206b4486f1d9096"}, + {file = "torch-2.1.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b31230bd058424e56dba7f899280dbc6ac8b9948e43902e0c84a44666b1ec151"}, + {file = "torch-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:403f1095e665e4f35971b43797a920725b8b205723aa68254a4050c6beca29b6"}, + {file = "torch-2.1.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:715b50d8c1de5da5524a68287eb000f73e026e74d5f6b12bc450ef6995fcf5f9"}, + {file = "torch-2.1.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:db67e8725c76f4c7f4f02e7551bb16e81ba1a1912867bc35d7bb96d2be8c78b4"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.18.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = "*" + +[package.extras] +dynamo = ["jinja2"] +opt-einsum = ["opt-einsum (>=3.3)"] + +[[package]] +name = "tqdm" +version = "4.67.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "transformers" +version = "4.38.2" +description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "transformers-4.38.2-py3-none-any.whl", hash = "sha256:c4029cb9f01b3dd335e52f364c52d2b37c65b4c78e02e6a08b1919c5c928573e"}, + {file = "transformers-4.38.2.tar.gz", hash = "sha256:c5fc7ad682b8a50a48b2a4c05d4ea2de5567adb1bdd00053619dbe5960857dd5"}, +] + +[package.dependencies] +filelock = "*" +huggingface-hub = ">=0.19.3,<1.0" +numpy = ">=1.17" +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +requests = "*" +safetensors = ">=0.4.1" +tokenizers = ">=0.14,<0.19" +tqdm = ">=4.27" + +[package.extras] +accelerate = ["accelerate (>=0.21.0)"] +agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +codecarbon = ["codecarbon (==1.2.0)"] +deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.14,<0.19)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +docs = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"] +docs-specific = ["hf-doc-builder"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +ftfy = ["ftfy"] +integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +modelcreation = ["cookiecutter (==1.7.3)"] +natten = ["natten (>=0.14.6,<0.15.0)"] +onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] +onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (==0.1.5)", "urllib3 (<2.0.0)"] +ray = ["ray[tune] (>=2.7.0)"] +retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["fastapi", "pydantic", "starlette", "uvicorn"] +sigopt = ["sigopt"] +sklearn = ["scikit-learn"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "tensorboard", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +timm = ["timm"] +tokenizers = ["tokenizers (>=0.14,<0.19)"] +torch = ["accelerate (>=0.21.0)", "torch"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] +torchhub = ["filelock", "huggingface-hub (>=0.19.3,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.14,<0.19)", "torch", "tqdm (>=4.27)"] +video = ["av (==9.2.0)", "decord (==0.6.0)"] +vision = ["Pillow (>=10.0.1,<=15.0)"] + +[[package]] +name = "triton" +version = "2.1.0" +description = "A language and compiler for custom Deep Learning operations" +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "triton-2.1.0-0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:66439923a30d5d48399b08a9eae10370f6c261a5ec864a64983bae63152d39d7"}, + {file = "triton-2.1.0-0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:919b06453f0033ea52c13eaf7833de0e57db3178d23d4e04f9fc71c4f2c32bf8"}, + {file = "triton-2.1.0-0-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ae4bb8a91de790e1866405211c4d618379781188f40d5c4c399766914e84cd94"}, + {file = "triton-2.1.0-0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39f6fb6bdccb3e98f3152e3fbea724f1aeae7d749412bbb1fa9c441d474eba26"}, + {file = "triton-2.1.0-0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:21544e522c02005a626c8ad63d39bdff2f31d41069592919ef281e964ed26446"}, + {file = "triton-2.1.0-0-pp37-pypy37_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:143582ca31dd89cd982bd3bf53666bab1c7527d41e185f9e3d8a3051ce1b663b"}, + {file = "triton-2.1.0-0-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82fc5aeeedf6e36be4e4530cbdcba81a09d65c18e02f52dc298696d45721f3bd"}, + {file = "triton-2.1.0-0-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:81a96d110a738ff63339fc892ded095b31bd0d205e3aace262af8400d40b6fa8"}, +] + +[package.dependencies] +filelock = "*" + +[package.extras] +build = ["cmake (>=3.18)", "lit"] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)"] +tutorials = ["matplotlib", "pandas", "tabulate"] + +[[package]] +name = "typer" +version = "0.16.0" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "typer-0.16.0-py3-none-any.whl", hash = "sha256:1f79bed11d4d02d4310e3c1b7ba594183bcedb0ac73b27a9e5f28f6fb5b98855"}, + {file = "typer-0.16.0.tar.gz", hash = "sha256:af377ffaee1dbe37ae9440cb4e8f11686ea5ce4e9bae01b84ae7c63b87f1dd3b"}, +] + +[package.dependencies] +click = ">=8.0.0" +rich = ">=10.11.0" +shellingham = ">=1.3.0" +typing-extensions = ">=3.7.4.3" + +[[package]] +name = "typing-extensions" +version = "4.13.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, + {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, +] + +[[package]] +name = "typing-extensions" +version = "4.14.0" +description = "Backported and Experimental Type Hints for Python 3.9+" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, + {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, +] +markers = {main = "python_version >= \"3.10\"", dev = "python_version == \"3.10\""} + +[[package]] +name = "typing-inspection" +version = "0.4.1" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, + {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[[package]] +name = "tzdata" +version = "2025.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +groups = ["main"] +files = [ + {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, + {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, +] + +[[package]] +name = "ujson" +version = "5.10.0" +description = "Ultra fast JSON encoder and decoder for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "ujson-5.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2601aa9ecdbee1118a1c2065323bda35e2c5a2cf0797ef4522d485f9d3ef65bd"}, + {file = "ujson-5.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:348898dd702fc1c4f1051bc3aacbf894caa0927fe2c53e68679c073375f732cf"}, + {file = "ujson-5.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22cffecf73391e8abd65ef5f4e4dd523162a3399d5e84faa6aebbf9583df86d6"}, + {file = "ujson-5.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26b0e2d2366543c1bb4fbd457446f00b0187a2bddf93148ac2da07a53fe51569"}, + {file = "ujson-5.10.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:caf270c6dba1be7a41125cd1e4fc7ba384bf564650beef0df2dd21a00b7f5770"}, + {file = "ujson-5.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a245d59f2ffe750446292b0094244df163c3dc96b3ce152a2c837a44e7cda9d1"}, + {file = "ujson-5.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:94a87f6e151c5f483d7d54ceef83b45d3a9cca7a9cb453dbdbb3f5a6f64033f5"}, + {file = "ujson-5.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:29b443c4c0a113bcbb792c88bea67b675c7ca3ca80c3474784e08bba01c18d51"}, + {file = "ujson-5.10.0-cp310-cp310-win32.whl", hash = "sha256:c18610b9ccd2874950faf474692deee4223a994251bc0a083c114671b64e6518"}, + {file = "ujson-5.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:924f7318c31874d6bb44d9ee1900167ca32aa9b69389b98ecbde34c1698a250f"}, + {file = "ujson-5.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a5b366812c90e69d0f379a53648be10a5db38f9d4ad212b60af00bd4048d0f00"}, + {file = "ujson-5.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:502bf475781e8167f0f9d0e41cd32879d120a524b22358e7f205294224c71126"}, + {file = "ujson-5.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b91b5d0d9d283e085e821651184a647699430705b15bf274c7896f23fe9c9d8"}, + {file = "ujson-5.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:129e39af3a6d85b9c26d5577169c21d53821d8cf68e079060602e861c6e5da1b"}, + {file = "ujson-5.10.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f77b74475c462cb8b88680471193064d3e715c7c6074b1c8c412cb526466efe9"}, + {file = "ujson-5.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7ec0ca8c415e81aa4123501fee7f761abf4b7f386aad348501a26940beb1860f"}, + {file = "ujson-5.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab13a2a9e0b2865a6c6db9271f4b46af1c7476bfd51af1f64585e919b7c07fd4"}, + {file = "ujson-5.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:57aaf98b92d72fc70886b5a0e1a1ca52c2320377360341715dd3933a18e827b1"}, + {file = "ujson-5.10.0-cp311-cp311-win32.whl", hash = "sha256:2987713a490ceb27edff77fb184ed09acdc565db700ee852823c3dc3cffe455f"}, + {file = "ujson-5.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:f00ea7e00447918ee0eff2422c4add4c5752b1b60e88fcb3c067d4a21049a720"}, + {file = "ujson-5.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:98ba15d8cbc481ce55695beee9f063189dce91a4b08bc1d03e7f0152cd4bbdd5"}, + {file = "ujson-5.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9d2edbf1556e4f56e50fab7d8ff993dbad7f54bac68eacdd27a8f55f433578e"}, + {file = "ujson-5.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6627029ae4f52d0e1a2451768c2c37c0c814ffc04f796eb36244cf16b8e57043"}, + {file = "ujson-5.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ccb77b3e40b151e20519c6ae6d89bfe3f4c14e8e210d910287f778368bb3d1"}, + {file = "ujson-5.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3caf9cd64abfeb11a3b661329085c5e167abbe15256b3b68cb5d914ba7396f3"}, + {file = "ujson-5.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6e32abdce572e3a8c3d02c886c704a38a1b015a1fb858004e03d20ca7cecbb21"}, + {file = "ujson-5.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a65b6af4d903103ee7b6f4f5b85f1bfd0c90ba4eeac6421aae436c9988aa64a2"}, + {file = "ujson-5.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:604a046d966457b6cdcacc5aa2ec5314f0e8c42bae52842c1e6fa02ea4bda42e"}, + {file = "ujson-5.10.0-cp312-cp312-win32.whl", hash = "sha256:6dea1c8b4fc921bf78a8ff00bbd2bfe166345f5536c510671bccececb187c80e"}, + {file = "ujson-5.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:38665e7d8290188b1e0d57d584eb8110951a9591363316dd41cf8686ab1d0abc"}, + {file = "ujson-5.10.0-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:618efd84dc1acbd6bff8eaa736bb6c074bfa8b8a98f55b61c38d4ca2c1f7f287"}, + {file = "ujson-5.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38d5d36b4aedfe81dfe251f76c0467399d575d1395a1755de391e58985ab1c2e"}, + {file = "ujson-5.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67079b1f9fb29ed9a2914acf4ef6c02844b3153913eb735d4bf287ee1db6e557"}, + {file = "ujson-5.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7d0e0ceeb8fe2468c70ec0c37b439dd554e2aa539a8a56365fd761edb418988"}, + {file = "ujson-5.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:59e02cd37bc7c44d587a0ba45347cc815fb7a5fe48de16bf05caa5f7d0d2e816"}, + {file = "ujson-5.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a890b706b64e0065f02577bf6d8ca3b66c11a5e81fb75d757233a38c07a1f20"}, + {file = "ujson-5.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:621e34b4632c740ecb491efc7f1fcb4f74b48ddb55e65221995e74e2d00bbff0"}, + {file = "ujson-5.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9500e61fce0cfc86168b248104e954fead61f9be213087153d272e817ec7b4f"}, + {file = "ujson-5.10.0-cp313-cp313-win32.whl", hash = "sha256:4c4fc16f11ac1612f05b6f5781b384716719547e142cfd67b65d035bd85af165"}, + {file = "ujson-5.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:4573fd1695932d4f619928fd09d5d03d917274381649ade4328091ceca175539"}, + {file = "ujson-5.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a984a3131da7f07563057db1c3020b1350a3e27a8ec46ccbfbf21e5928a43050"}, + {file = "ujson-5.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73814cd1b9db6fc3270e9d8fe3b19f9f89e78ee9d71e8bd6c9a626aeaeaf16bd"}, + {file = "ujson-5.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61e1591ed9376e5eddda202ec229eddc56c612b61ac6ad07f96b91460bb6c2fb"}, + {file = "ujson-5.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2c75269f8205b2690db4572a4a36fe47cd1338e4368bc73a7a0e48789e2e35a"}, + {file = "ujson-5.10.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7223f41e5bf1f919cd8d073e35b229295aa8e0f7b5de07ed1c8fddac63a6bc5d"}, + {file = "ujson-5.10.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d4dc2fd6b3067c0782e7002ac3b38cf48608ee6366ff176bbd02cf969c9c20fe"}, + {file = "ujson-5.10.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:232cc85f8ee3c454c115455195a205074a56ff42608fd6b942aa4c378ac14dd7"}, + {file = "ujson-5.10.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cc6139531f13148055d691e442e4bc6601f6dba1e6d521b1585d4788ab0bfad4"}, + {file = "ujson-5.10.0-cp38-cp38-win32.whl", hash = "sha256:e7ce306a42b6b93ca47ac4a3b96683ca554f6d35dd8adc5acfcd55096c8dfcb8"}, + {file = "ujson-5.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:e82d4bb2138ab05e18f089a83b6564fee28048771eb63cdecf4b9b549de8a2cc"}, + {file = "ujson-5.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dfef2814c6b3291c3c5f10065f745a1307d86019dbd7ea50e83504950136ed5b"}, + {file = "ujson-5.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4734ee0745d5928d0ba3a213647f1c4a74a2a28edc6d27b2d6d5bd9fa4319e27"}, + {file = "ujson-5.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47ebb01bd865fdea43da56254a3930a413f0c5590372a1241514abae8aa7c76"}, + {file = "ujson-5.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dee5e97c2496874acbf1d3e37b521dd1f307349ed955e62d1d2f05382bc36dd5"}, + {file = "ujson-5.10.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7490655a2272a2d0b072ef16b0b58ee462f4973a8f6bbe64917ce5e0a256f9c0"}, + {file = "ujson-5.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba17799fcddaddf5c1f75a4ba3fd6441f6a4f1e9173f8a786b42450851bd74f1"}, + {file = "ujson-5.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2aff2985cef314f21d0fecc56027505804bc78802c0121343874741650a4d3d1"}, + {file = "ujson-5.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ad88ac75c432674d05b61184178635d44901eb749786c8eb08c102330e6e8996"}, + {file = "ujson-5.10.0-cp39-cp39-win32.whl", hash = "sha256:2544912a71da4ff8c4f7ab5606f947d7299971bdd25a45e008e467ca638d13c9"}, + {file = "ujson-5.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:3ff201d62b1b177a46f113bb43ad300b424b7847f9c5d38b1b4ad8f75d4a282a"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5b6fee72fa77dc172a28f21693f64d93166534c263adb3f96c413ccc85ef6e64"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:61d0af13a9af01d9f26d2331ce49bb5ac1fb9c814964018ac8df605b5422dcb3"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecb24f0bdd899d368b715c9e6664166cf694d1e57be73f17759573a6986dd95a"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbd8fd427f57a03cff3ad6574b5e299131585d9727c8c366da4624a9069ed746"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beeaf1c48e32f07d8820c705ff8e645f8afa690cca1544adba4ebfa067efdc88"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:baed37ea46d756aca2955e99525cc02d9181de67f25515c468856c38d52b5f3b"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7663960f08cd5a2bb152f5ee3992e1af7690a64c0e26d31ba7b3ff5b2ee66337"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8640fb4072d36b08e95a3a380ba65779d356b2fee8696afeb7794cf0902d0a1"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78778a3aa7aafb11e7ddca4e29f46bc5139131037ad628cc10936764282d6753"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0111b27f2d5c820e7f2dbad7d48e3338c824e7ac4d2a12da3dc6061cc39c8e6"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:c66962ca7565605b355a9ed478292da628b8f18c0f2793021ca4425abf8b01e5"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ba43cc34cce49cf2d4bc76401a754a81202d8aa926d0e2b79f0ee258cb15d3a4"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac56eb983edce27e7f51d05bc8dd820586c6e6be1c5216a6809b0c668bb312b8"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44bd4b23a0e723bf8b10628288c2c7c335161d6840013d4d5de20e48551773b"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c10f4654e5326ec14a46bcdeb2b685d4ada6911050aa8baaf3501e57024b804"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0de4971a89a762398006e844ae394bd46991f7c385d7a6a3b93ba229e6dac17e"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e1402f0564a97d2a52310ae10a64d25bcef94f8dd643fcf5d310219d915484f7"}, + {file = "ujson-5.10.0.tar.gz", hash = "sha256:b3cd8f3c5d8c7738257f1018880444f7b7d9b66232c64649f562d7ba86ad4bc1"}, +] + +[[package]] +name = "urllib3" +version = "1.26.20" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["main"] +files = [ + {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"}, + {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"}, +] + +[package.extras] +brotli = ["brotli (==1.0.9) ; os_name != \"nt\" and python_version < \"3\" and platform_python_implementation == \"CPython\"", "brotli (>=1.0.9) ; python_version >= \"3\" and platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; (os_name != \"nt\" or python_version >= \"3\") and platform_python_implementation != \"CPython\"", "brotlipy (>=0.6.0) ; os_name == \"nt\" and python_version < \"3\""] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "uvicorn" +version = "0.32.0" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "uvicorn-0.32.0-py3-none-any.whl", hash = "sha256:60b8f3a5ac027dcd31448f411ced12b5ef452c646f76f02f8cc3f25d8d26fd82"}, + {file = "uvicorn-0.32.0.tar.gz", hash = "sha256:f78b36b143c16f54ccdb8190d0a26b5f1901fe5a3c777e1ab29f26391af8551e"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} +h11 = ">=0.8" +httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""} +python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} + +[package.extras] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "uvloop" +version = "0.21.0" +description = "Fast implementation of asyncio event loop on top of libuv" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"" +files = [ + {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"}, + {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"}, + {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f38b2e090258d051d68a5b14d1da7203a3c3677321cf32a95a6f4db4dd8b6f26"}, + {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c43e0f13022b998eb9b973b5e97200c8b90823454d4bc06ab33829e09fb9bb"}, + {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:10d66943def5fcb6e7b37310eb6b5639fd2ccbc38df1177262b0640c3ca68c1f"}, + {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:67dd654b8ca23aed0a8e99010b4c34aca62f4b7fce88f39d452ed7622c94845c"}, + {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c0f3fa6200b3108919f8bdabb9a7f87f20e7097ea3c543754cabc7d717d95cf8"}, + {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0878c2640cf341b269b7e128b1a5fed890adc4455513ca710d77d5e93aa6d6a0"}, + {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9fb766bb57b7388745d8bcc53a359b116b8a04c83a2288069809d2b3466c37e"}, + {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a375441696e2eda1c43c44ccb66e04d61ceeffcd76e4929e527b7fa401b90fb"}, + {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:baa0e6291d91649c6ba4ed4b2f982f9fa165b5bbd50a9e203c416a2797bab3c6"}, + {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4509360fcc4c3bd2c70d87573ad472de40c13387f5fda8cb58350a1d7475e58d"}, + {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:359ec2c888397b9e592a889c4d72ba3d6befba8b2bb01743f72fffbde663b59c"}, + {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7089d2dc73179ce5ac255bdf37c236a9f914b264825fdaacaded6990a7fb4c2"}, + {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa4dcdbd9ae0a372f2167a207cd98c9f9a1ea1188a8a526431eef2f8116cc8d"}, + {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc"}, + {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb"}, + {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f"}, + {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281"}, + {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af"}, + {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6"}, + {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816"}, + {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc"}, + {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553"}, + {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:17df489689befc72c39a08359efac29bbee8eee5209650d4b9f34df73d22e414"}, + {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc09f0ff191e61c2d592a752423c767b4ebb2986daa9ed62908e2b1b9a9ae206"}, + {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0ce1b49560b1d2d8a2977e3ba4afb2414fb46b86a1b64056bc4ab929efdafbe"}, + {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e678ad6fe52af2c58d2ae3c73dc85524ba8abe637f134bf3564ed07f555c5e79"}, + {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:460def4412e473896ef179a1671b40c039c7012184b627898eea5072ef6f017a"}, + {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:10da8046cc4a8f12c91a1c39d1dd1585c41162a15caaef165c2174db9ef18bdc"}, + {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c097078b8031190c934ed0ebfee8cc5f9ba9642e6eb88322b9958b649750f72b"}, + {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:46923b0b5ee7fc0020bef24afe7836cb068f5050ca04caf6b487c513dc1a20b2"}, + {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53e420a3afe22cdcf2a0f4846e377d16e718bc70103d7088a4f7623567ba5fb0"}, + {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb67cdbc0e483da00af0b2c3cdad4b7c61ceb1ee0f33fe00e09c81e3a6cb75"}, + {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:221f4f2a1f46032b403bf3be628011caf75428ee3cc204a22addf96f586b19fd"}, + {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2d1f581393673ce119355d56da84fe1dd9d2bb8b3d13ce792524e1607139feff"}, + {file = "uvloop-0.21.0.tar.gz", hash = "sha256:3bf12b0fda68447806a7ad847bfa591613177275d35b6724b1ee573faa3704e3"}, +] + +[package.extras] +dev = ["Cython (>=3.0,<4.0)", "setuptools (>=60)"] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["aiohttp (>=3.10.5)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] + +[[package]] +name = "watchfiles" +version = "0.24.0" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "watchfiles-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:083dc77dbdeef09fa44bb0f4d1df571d2e12d8a8f985dccde71ac3ac9ac067a0"}, + {file = "watchfiles-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e94e98c7cb94cfa6e071d401ea3342767f28eb5a06a58fafdc0d2a4974f4f35c"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82ae557a8c037c42a6ef26c494d0631cacca040934b101d001100ed93d43f361"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acbfa31e315a8f14fe33e3542cbcafc55703b8f5dcbb7c1eecd30f141df50db3"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b74fdffce9dfcf2dc296dec8743e5b0332d15df19ae464f0e249aa871fc1c571"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:449f43f49c8ddca87c6b3980c9284cab6bd1f5c9d9a2b00012adaaccd5e7decd"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4abf4ad269856618f82dee296ac66b0cd1d71450fc3c98532d93798e73399b7a"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f895d785eb6164678ff4bb5cc60c5996b3ee6df3edb28dcdeba86a13ea0465e"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ae3e208b31be8ce7f4c2c0034f33406dd24fbce3467f77223d10cd86778471c"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2efec17819b0046dde35d13fb8ac7a3ad877af41ae4640f4109d9154ed30a188"}, + {file = "watchfiles-0.24.0-cp310-none-win32.whl", hash = "sha256:6bdcfa3cd6fdbdd1a068a52820f46a815401cbc2cb187dd006cb076675e7b735"}, + {file = "watchfiles-0.24.0-cp310-none-win_amd64.whl", hash = "sha256:54ca90a9ae6597ae6dc00e7ed0a040ef723f84ec517d3e7ce13e63e4bc82fa04"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bdcd5538e27f188dd3c804b4a8d5f52a7fc7f87e7fd6b374b8e36a4ca03db428"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2dadf8a8014fde6addfd3c379e6ed1a981c8f0a48292d662e27cabfe4239c83c"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6509ed3f467b79d95fc62a98229f79b1a60d1b93f101e1c61d10c95a46a84f43"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8360f7314a070c30e4c976b183d1d8d1585a4a50c5cb603f431cebcbb4f66327"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:316449aefacf40147a9efaf3bd7c9bdd35aaba9ac5d708bd1eb5763c9a02bef5"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73bde715f940bea845a95247ea3e5eb17769ba1010efdc938ffcb967c634fa61"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3770e260b18e7f4e576edca4c0a639f704088602e0bc921c5c2e721e3acb8d15"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa0fd7248cf533c259e59dc593a60973a73e881162b1a2f73360547132742823"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d7a2e3b7f5703ffbd500dabdefcbc9eafeff4b9444bbdd5d83d79eedf8428fab"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d831ee0a50946d24a53821819b2327d5751b0c938b12c0653ea5be7dea9c82ec"}, + {file = "watchfiles-0.24.0-cp311-none-win32.whl", hash = "sha256:49d617df841a63b4445790a254013aea2120357ccacbed00253f9c2b5dc24e2d"}, + {file = "watchfiles-0.24.0-cp311-none-win_amd64.whl", hash = "sha256:d3dcb774e3568477275cc76554b5a565024b8ba3a0322f77c246bc7111c5bb9c"}, + {file = "watchfiles-0.24.0-cp311-none-win_arm64.whl", hash = "sha256:9301c689051a4857d5b10777da23fafb8e8e921bcf3abe6448a058d27fb67633"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7211b463695d1e995ca3feb38b69227e46dbd03947172585ecb0588f19b0d87a"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b8693502d1967b00f2fb82fc1e744df128ba22f530e15b763c8d82baee15370"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdab9555053399318b953a1fe1f586e945bc8d635ce9d05e617fd9fe3a4687d6"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34e19e56d68b0dad5cff62273107cf5d9fbaf9d75c46277aa5d803b3ef8a9e9b"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41face41f036fee09eba33a5b53a73e9a43d5cb2c53dad8e61fa6c9f91b5a51e"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5148c2f1ea043db13ce9b0c28456e18ecc8f14f41325aa624314095b6aa2e9ea"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e4bd963a935aaf40b625c2499f3f4f6bbd0c3776f6d3bc7c853d04824ff1c9f"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c79d7719d027b7a42817c5d96461a99b6a49979c143839fc37aa5748c322f234"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:32aa53a9a63b7f01ed32e316e354e81e9da0e6267435c7243bf8ae0f10b428ef"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce72dba6a20e39a0c628258b5c308779b8697f7676c254a845715e2a1039b968"}, + {file = "watchfiles-0.24.0-cp312-none-win32.whl", hash = "sha256:d9018153cf57fc302a2a34cb7564870b859ed9a732d16b41a9b5cb2ebed2d444"}, + {file = "watchfiles-0.24.0-cp312-none-win_amd64.whl", hash = "sha256:551ec3ee2a3ac9cbcf48a4ec76e42c2ef938a7e905a35b42a1267fa4b1645896"}, + {file = "watchfiles-0.24.0-cp312-none-win_arm64.whl", hash = "sha256:b52a65e4ea43c6d149c5f8ddb0bef8d4a1e779b77591a458a893eb416624a418"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2e3ab79a1771c530233cadfd277fcc762656d50836c77abb2e5e72b88e3a48"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327763da824817b38ad125dcd97595f942d720d32d879f6c4ddf843e3da3fe90"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd82010f8ab451dabe36054a1622870166a67cf3fce894f68895db6f74bbdc94"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d64ba08db72e5dfd5c33be1e1e687d5e4fcce09219e8aee893a4862034081d4e"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1cf1f6dd7825053f3d98f6d33f6464ebdd9ee95acd74ba2c34e183086900a827"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43e3e37c15a8b6fe00c1bce2473cfa8eb3484bbeecf3aefbf259227e487a03df"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88bcd4d0fe1d8ff43675360a72def210ebad3f3f72cabfeac08d825d2639b4ab"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:999928c6434372fde16c8f27143d3e97201160b48a614071261701615a2a156f"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:30bbd525c3262fd9f4b1865cb8d88e21161366561cd7c9e1194819e0a33ea86b"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edf71b01dec9f766fb285b73930f95f730bb0943500ba0566ae234b5c1618c18"}, + {file = "watchfiles-0.24.0-cp313-none-win32.whl", hash = "sha256:f4c96283fca3ee09fb044f02156d9570d156698bc3734252175a38f0e8975f07"}, + {file = "watchfiles-0.24.0-cp313-none-win_amd64.whl", hash = "sha256:a974231b4fdd1bb7f62064a0565a6b107d27d21d9acb50c484d2cdba515b9366"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:ee82c98bed9d97cd2f53bdb035e619309a098ea53ce525833e26b93f673bc318"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fd92bbaa2ecdb7864b7600dcdb6f2f1db6e0346ed425fbd01085be04c63f0b05"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f83df90191d67af5a831da3a33dd7628b02a95450e168785586ed51e6d28943c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fca9433a45f18b7c779d2bae7beeec4f740d28b788b117a48368d95a3233ed83"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b995bfa6bf01a9e09b884077a6d37070464b529d8682d7691c2d3b540d357a0c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed9aba6e01ff6f2e8285e5aa4154e2970068fe0fc0998c4380d0e6278222269b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5171ef898299c657685306d8e1478a45e9303ddcd8ac5fed5bd52ad4ae0b69b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4933a508d2f78099162da473841c652ad0de892719043d3f07cc83b33dfd9d91"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95cf3b95ea665ab03f5a54765fa41abf0529dbaf372c3b83d91ad2cfa695779b"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:01def80eb62bd5db99a798d5e1f5f940ca0a05986dcfae21d833af7a46f7ee22"}, + {file = "watchfiles-0.24.0-cp38-none-win32.whl", hash = "sha256:4d28cea3c976499475f5b7a2fec6b3a36208656963c1a856d328aeae056fc5c1"}, + {file = "watchfiles-0.24.0-cp38-none-win_amd64.whl", hash = "sha256:21ab23fdc1208086d99ad3f69c231ba265628014d4aed31d4e8746bd59e88cd1"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b665caeeda58625c3946ad7308fbd88a086ee51ccb706307e5b1fa91556ac886"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5c51749f3e4e269231510da426ce4a44beb98db2dce9097225c338f815b05d4f"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b2509f08761f29a0fdad35f7e1638b8ab1adfa2666d41b794090361fb8b855"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a60e2bf9dc6afe7f743e7c9b149d1fdd6dbf35153c78fe3a14ae1a9aee3d98b"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7d9b87c4c55e3ea8881dfcbf6d61ea6775fffed1fedffaa60bd047d3c08c430"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78470906a6be5199524641f538bd2c56bb809cd4bf29a566a75051610bc982c3"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07cdef0c84c03375f4e24642ef8d8178e533596b229d32d2bbd69e5128ede02a"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d337193bbf3e45171c8025e291530fb7548a93c45253897cd764a6a71c937ed9"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ec39698c45b11d9694a1b635a70946a5bad066b593af863460a8e600f0dff1ca"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2e28d91ef48eab0afb939fa446d8ebe77e2f7593f5f463fd2bb2b14132f95b6e"}, + {file = "watchfiles-0.24.0-cp39-none-win32.whl", hash = "sha256:7138eff8baa883aeaa074359daabb8b6c1e73ffe69d5accdc907d62e50b1c0da"}, + {file = "watchfiles-0.24.0-cp39-none-win_amd64.whl", hash = "sha256:b3ef2c69c655db63deb96b3c3e587084612f9b1fa983df5e0c3379d41307467f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632676574429bee8c26be8af52af20e0c718cc7f5f67f3fb658c71928ccd4f7f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a2a9891723a735d3e2540651184be6fd5b96880c08ffe1a98bae5017e65b544b"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7fa2bc0efef3e209a8199fd111b8969fe9db9c711acc46636686331eda7dd4"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01550ccf1d0aed6ea375ef259706af76ad009ef5b0203a3a4cce0f6024f9b68a"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:96619302d4374de5e2345b2b622dc481257a99431277662c30f606f3e22f42be"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:85d5f0c7771dcc7a26c7a27145059b6bb0ce06e4e751ed76cdf123d7039b60b5"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951088d12d339690a92cef2ec5d3cfd957692834c72ffd570ea76a6790222777"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fb58bcaa343fedc6a9e91f90195b20ccb3135447dc9e4e2570c3a39565853e"}, + {file = "watchfiles-0.24.0.tar.gz", hash = "sha256:afb72325b74fa7a428c009c1b8be4b4d7c2afedafb2982827ef2156646df2fe1"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + +[[package]] +name = "watchfiles" +version = "1.1.0" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "watchfiles-1.1.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:27f30e14aa1c1e91cb653f03a63445739919aef84c8d2517997a83155e7a2fcc"}, + {file = "watchfiles-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3366f56c272232860ab45c77c3ca7b74ee819c8e1f6f35a7125556b198bbc6df"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8412eacef34cae2836d891836a7fff7b754d6bcac61f6c12ba5ca9bc7e427b68"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df670918eb7dd719642e05979fc84704af913d563fd17ed636f7c4783003fdcc"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7642b9bc4827b5518ebdb3b82698ada8c14c7661ddec5fe719f3e56ccd13c97"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:199207b2d3eeaeb80ef4411875a6243d9ad8bc35b07fc42daa6b801cc39cc41c"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a479466da6db5c1e8754caee6c262cd373e6e6c363172d74394f4bff3d84d7b5"}, + {file = "watchfiles-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:935f9edd022ec13e447e5723a7d14456c8af254544cefbc533f6dd276c9aa0d9"}, + {file = "watchfiles-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8076a5769d6bdf5f673a19d51da05fc79e2bbf25e9fe755c47595785c06a8c72"}, + {file = "watchfiles-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:86b1e28d4c37e89220e924305cd9f82866bb0ace666943a6e4196c5df4d58dcc"}, + {file = "watchfiles-1.1.0-cp310-cp310-win32.whl", hash = "sha256:d1caf40c1c657b27858f9774d5c0e232089bca9cb8ee17ce7478c6e9264d2587"}, + {file = "watchfiles-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:a89c75a5b9bc329131115a409d0acc16e8da8dfd5867ba59f1dd66ae7ea8fa82"}, + {file = "watchfiles-1.1.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c9649dfc57cc1f9835551deb17689e8d44666315f2e82d337b9f07bd76ae3aa2"}, + {file = "watchfiles-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:406520216186b99374cdb58bc48e34bb74535adec160c8459894884c983a149c"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb45350fd1dc75cd68d3d72c47f5b513cb0578da716df5fba02fff31c69d5f2d"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11ee4444250fcbeb47459a877e5e80ed994ce8e8d20283857fc128be1715dac7"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bda8136e6a80bdea23e5e74e09df0362744d24ffb8cd59c4a95a6ce3d142f79c"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b915daeb2d8c1f5cee4b970f2e2c988ce6514aace3c9296e58dd64dc9aa5d575"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed8fc66786de8d0376f9f913c09e963c66e90ced9aa11997f93bdb30f7c872a8"}, + {file = "watchfiles-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe4371595edf78c41ef8ac8df20df3943e13defd0efcb732b2e393b5a8a7a71f"}, + {file = "watchfiles-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b7c5f6fe273291f4d414d55b2c80d33c457b8a42677ad14b4b47ff025d0893e4"}, + {file = "watchfiles-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7738027989881e70e3723c75921f1efa45225084228788fc59ea8c6d732eb30d"}, + {file = "watchfiles-1.1.0-cp311-cp311-win32.whl", hash = "sha256:622d6b2c06be19f6e89b1d951485a232e3b59618def88dbeda575ed8f0d8dbf2"}, + {file = "watchfiles-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:48aa25e5992b61debc908a61ab4d3f216b64f44fdaa71eb082d8b2de846b7d12"}, + {file = "watchfiles-1.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:00645eb79a3faa70d9cb15c8d4187bb72970b2470e938670240c7998dad9f13a"}, + {file = "watchfiles-1.1.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9dc001c3e10de4725c749d4c2f2bdc6ae24de5a88a339c4bce32300a31ede179"}, + {file = "watchfiles-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9ba68ec283153dead62cbe81872d28e053745f12335d037de9cbd14bd1877f5"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130fc497b8ee68dce163e4254d9b0356411d1490e868bd8790028bc46c5cc297"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:50a51a90610d0845a5931a780d8e51d7bd7f309ebc25132ba975aca016b576a0"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc44678a72ac0910bac46fa6a0de6af9ba1355669b3dfaf1ce5f05ca7a74364e"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a543492513a93b001975ae283a51f4b67973662a375a403ae82f420d2c7205ee"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ac164e20d17cc285f2b94dc31c384bc3aa3dd5e7490473b3db043dd70fbccfd"}, + {file = "watchfiles-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7590d5a455321e53857892ab8879dce62d1f4b04748769f5adf2e707afb9d4f"}, + {file = "watchfiles-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:37d3d3f7defb13f62ece99e9be912afe9dd8a0077b7c45ee5a57c74811d581a4"}, + {file = "watchfiles-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7080c4bb3efd70a07b1cc2df99a7aa51d98685be56be6038c3169199d0a1c69f"}, + {file = "watchfiles-1.1.0-cp312-cp312-win32.whl", hash = "sha256:cbcf8630ef4afb05dc30107bfa17f16c0896bb30ee48fc24bf64c1f970f3b1fd"}, + {file = "watchfiles-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:cbd949bdd87567b0ad183d7676feb98136cde5bb9025403794a4c0db28ed3a47"}, + {file = "watchfiles-1.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:0a7d40b77f07be87c6faa93d0951a0fcd8cbca1ddff60a1b65d741bac6f3a9f6"}, + {file = "watchfiles-1.1.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5007f860c7f1f8df471e4e04aaa8c43673429047d63205d1630880f7637bca30"}, + {file = "watchfiles-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:20ecc8abbd957046f1fe9562757903f5eaf57c3bce70929fda6c7711bb58074a"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2f0498b7d2a3c072766dba3274fe22a183dbea1f99d188f1c6c72209a1063dc"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:239736577e848678e13b201bba14e89718f5c2133dfd6b1f7846fa1b58a8532b"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eff4b8d89f444f7e49136dc695599a591ff769300734446c0a86cba2eb2f9895"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12b0a02a91762c08f7264e2e79542f76870c3040bbc847fb67410ab81474932a"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:29e7bc2eee15cbb339c68445959108803dc14ee0c7b4eea556400131a8de462b"}, + {file = "watchfiles-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9481174d3ed982e269c090f780122fb59cee6c3796f74efe74e70f7780ed94c"}, + {file = "watchfiles-1.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:80f811146831c8c86ab17b640801c25dc0a88c630e855e2bef3568f30434d52b"}, + {file = "watchfiles-1.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:60022527e71d1d1fda67a33150ee42869042bce3d0fcc9cc49be009a9cded3fb"}, + {file = "watchfiles-1.1.0-cp313-cp313-win32.whl", hash = "sha256:32d6d4e583593cb8576e129879ea0991660b935177c0f93c6681359b3654bfa9"}, + {file = "watchfiles-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:f21af781a4a6fbad54f03c598ab620e3a77032c5878f3d780448421a6e1818c7"}, + {file = "watchfiles-1.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:5366164391873ed76bfdf618818c82084c9db7fac82b64a20c44d335eec9ced5"}, + {file = "watchfiles-1.1.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:17ab167cca6339c2b830b744eaf10803d2a5b6683be4d79d8475d88b4a8a4be1"}, + {file = "watchfiles-1.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:328dbc9bff7205c215a7807da7c18dce37da7da718e798356212d22696404339"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7208ab6e009c627b7557ce55c465c98967e8caa8b11833531fdf95799372633"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a8f6f72974a19efead54195bc9bed4d850fc047bb7aa971268fd9a8387c89011"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d181ef50923c29cf0450c3cd47e2f0557b62218c50b2ab8ce2ecaa02bd97e670"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:adb4167043d3a78280d5d05ce0ba22055c266cf8655ce942f2fb881262ff3cdf"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c5701dc474b041e2934a26d31d39f90fac8a3dee2322b39f7729867f932b1d4"}, + {file = "watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b067915e3c3936966a8607f6fe5487df0c9c4afb85226613b520890049deea20"}, + {file = "watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:9c733cda03b6d636b4219625a4acb5c6ffb10803338e437fb614fef9516825ef"}, + {file = "watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:cc08ef8b90d78bfac66f0def80240b0197008e4852c9f285907377b2947ffdcb"}, + {file = "watchfiles-1.1.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:9974d2f7dc561cce3bb88dfa8eb309dab64c729de85fba32e98d75cf24b66297"}, + {file = "watchfiles-1.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c68e9f1fcb4d43798ad8814c4c1b61547b014b667216cb754e606bfade587018"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95ab1594377effac17110e1352989bdd7bdfca9ff0e5eeccd8c69c5389b826d0"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fba9b62da882c1be1280a7584ec4515d0a6006a94d6e5819730ec2eab60ffe12"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3434e401f3ce0ed6b42569128b3d1e3af773d7ec18751b918b89cd49c14eaafb"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa257a4d0d21fcbca5b5fcba9dca5a78011cb93c0323fb8855c6d2dfbc76eb77"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fd1b3879a578a8ec2076c7961076df540b9af317123f84569f5a9ddee64ce92"}, + {file = "watchfiles-1.1.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc7a30eeb0e20ecc5f4bd113cd69dcdb745a07c68c0370cea919f373f65d9e"}, + {file = "watchfiles-1.1.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:891c69e027748b4a73847335d208e374ce54ca3c335907d381fde4e41661b13b"}, + {file = "watchfiles-1.1.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:12fe8eaffaf0faa7906895b4f8bb88264035b3f0243275e0bf24af0436b27259"}, + {file = "watchfiles-1.1.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:bfe3c517c283e484843cb2e357dd57ba009cff351edf45fb455b5fbd1f45b15f"}, + {file = "watchfiles-1.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a9ccbf1f129480ed3044f540c0fdbc4ee556f7175e5ab40fe077ff6baf286d4e"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba0e3255b0396cac3cc7bbace76404dd72b5438bf0d8e7cefa2f79a7f3649caa"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4281cd9fce9fc0a9dbf0fc1217f39bf9cf2b4d315d9626ef1d4e87b84699e7e8"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d2404af8db1329f9a3c9b79ff63e0ae7131986446901582067d9304ae8aaf7f"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e78b6ed8165996013165eeabd875c5dfc19d41b54f94b40e9fff0eb3193e5e8e"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:249590eb75ccc117f488e2fabd1bfa33c580e24b96f00658ad88e38844a040bb"}, + {file = "watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05686b5487cfa2e2c28ff1aa370ea3e6c5accfe6435944ddea1e10d93872147"}, + {file = "watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:d0e10e6f8f6dc5762adee7dece33b722282e1f59aa6a55da5d493a97282fedd8"}, + {file = "watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:af06c863f152005c7592df1d6a7009c836a247c9d8adb78fef8575a5a98699db"}, + {file = "watchfiles-1.1.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:865c8e95713744cf5ae261f3067861e9da5f1370ba91fc536431e29b418676fa"}, + {file = "watchfiles-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:42f92befc848bb7a19658f21f3e7bae80d7d005d13891c62c2cd4d4d0abb3433"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0cc8365ab29487eb4f9979fd41b22549853389e22d5de3f134a6796e1b05a4"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:90ebb429e933645f3da534c89b29b665e285048973b4d2b6946526888c3eb2c7"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c588c45da9b08ab3da81d08d7987dae6d2a3badd63acdb3e206a42dbfa7cb76f"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c55b0f9f68590115c25272b06e63f0824f03d4fc7d6deed43d8ad5660cabdbf"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd17a1e489f02ce9117b0de3c0b1fab1c3e2eedc82311b299ee6b6faf6c23a29"}, + {file = "watchfiles-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da71945c9ace018d8634822f16cbc2a78323ef6c876b1d34bbf5d5222fd6a72e"}, + {file = "watchfiles-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:51556d5004887045dba3acdd1fdf61dddea2be0a7e18048b5e853dcd37149b86"}, + {file = "watchfiles-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04e4ed5d1cd3eae68c89bcc1a485a109f39f2fd8de05f705e98af6b5f1861f1f"}, + {file = "watchfiles-1.1.0-cp39-cp39-win32.whl", hash = "sha256:c600e85f2ffd9f1035222b1a312aff85fd11ea39baff1d705b9b047aad2ce267"}, + {file = "watchfiles-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3aba215958d88182e8d2acba0fdaf687745180974946609119953c0e112397dc"}, + {file = "watchfiles-1.1.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a6fd40bbb50d24976eb275ccb55cd1951dfb63dbc27cae3066a6ca5f4beabd5"}, + {file = "watchfiles-1.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9f811079d2f9795b5d48b55a37aa7773680a5659afe34b54cc1d86590a51507d"}, + {file = "watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2726d7bfd9f76158c84c10a409b77a320426540df8c35be172444394b17f7ea"}, + {file = "watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df32d59cb9780f66d165a9a7a26f19df2c7d24e3bd58713108b41d0ff4f929c6"}, + {file = "watchfiles-1.1.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0ece16b563b17ab26eaa2d52230c9a7ae46cf01759621f4fbbca280e438267b3"}, + {file = "watchfiles-1.1.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:51b81e55d40c4b4aa8658427a3ee7ea847c591ae9e8b81ef94a90b668999353c"}, + {file = "watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2bcdc54ea267fe72bfc7d83c041e4eb58d7d8dc6f578dfddb52f037ce62f432"}, + {file = "watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:923fec6e5461c42bd7e3fd5ec37492c6f3468be0499bc0707b4bbbc16ac21792"}, + {file = "watchfiles-1.1.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7b3443f4ec3ba5aa00b0e9fa90cf31d98321cbff8b925a7c7b84161619870bc9"}, + {file = "watchfiles-1.1.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7049e52167fc75fc3cc418fc13d39a8e520cbb60ca08b47f6cedb85e181d2f2a"}, + {file = "watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54062ef956807ba806559b3c3d52105ae1827a0d4ab47b621b31132b6b7e2866"}, + {file = "watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a7bd57a1bb02f9d5c398c0c1675384e7ab1dd39da0ca50b7f09af45fa435277"}, + {file = "watchfiles-1.1.0.tar.gz", hash = "sha256:693ed7ec72cbfcee399e92c895362b6e66d63dac6b91e2c11ae03d10d503e575"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + +[[package]] +name = "websocket-client" +version = "1.8.0" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, + {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, +] + +[package.extras] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[[package]] +name = "websockets" +version = "13.1" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, +] + +[[package]] +name = "websockets" +version = "15.0.1" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"}, + {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"}, + {file = "websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a"}, + {file = "websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e"}, + {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf"}, + {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb"}, + {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d"}, + {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9"}, + {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c"}, + {file = "websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256"}, + {file = "websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41"}, + {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431"}, + {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57"}, + {file = "websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905"}, + {file = "websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562"}, + {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792"}, + {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413"}, + {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8"}, + {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3"}, + {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf"}, + {file = "websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85"}, + {file = "websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065"}, + {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3"}, + {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665"}, + {file = "websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2"}, + {file = "websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215"}, + {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5"}, + {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65"}, + {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe"}, + {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4"}, + {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597"}, + {file = "websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9"}, + {file = "websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7"}, + {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931"}, + {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675"}, + {file = "websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151"}, + {file = "websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22"}, + {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f"}, + {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8"}, + {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375"}, + {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d"}, + {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4"}, + {file = "websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa"}, + {file = "websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561"}, + {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5"}, + {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a"}, + {file = "websockets-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b"}, + {file = "websockets-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770"}, + {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb"}, + {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054"}, + {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee"}, + {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed"}, + {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880"}, + {file = "websockets-15.0.1-cp39-cp39-win32.whl", hash = "sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411"}, + {file = "websockets-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123"}, + {file = "websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f"}, + {file = "websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee"}, +] + +[[package]] +name = "werkzeug" +version = "3.0.6" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "werkzeug-3.0.6-py3-none-any.whl", hash = "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17"}, + {file = "werkzeug-3.0.6.tar.gz", hash = "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "werkzeug" +version = "3.1.3" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"}, + {file = "werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wrapt" +version = "1.17.2" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62"}, + {file = "wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563"}, + {file = "wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72"}, + {file = "wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317"}, + {file = "wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9"}, + {file = "wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9"}, + {file = "wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504"}, + {file = "wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a"}, + {file = "wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f"}, + {file = "wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555"}, + {file = "wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f"}, + {file = "wrapt-1.17.2-cp38-cp38-win32.whl", hash = "sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7"}, + {file = "wrapt-1.17.2-cp38-cp38-win_amd64.whl", hash = "sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9"}, + {file = "wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb"}, + {file = "wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb"}, + {file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"}, + {file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"}, +] + +[[package]] +name = "xxhash" +version = "3.5.0" +description = "Python binding for xxHash" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "xxhash-3.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ece616532c499ee9afbb83078b1b952beffef121d989841f7f4b3dc5ac0fd212"}, + {file = "xxhash-3.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3171f693dbc2cef6477054a665dc255d996646b4023fe56cb4db80e26f4cc520"}, + {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c5d3e570ef46adaf93fc81b44aca6002b5a4d8ca11bd0580c07eac537f36680"}, + {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7cb29a034301e2982df8b1fe6328a84f4b676106a13e9135a0d7e0c3e9f806da"}, + {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d0d307d27099bb0cbeea7260eb39ed4fdb99c5542e21e94bb6fd29e49c57a23"}, + {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0342aafd421795d740e514bc9858ebddfc705a75a8c5046ac56d85fe97bf196"}, + {file = "xxhash-3.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3dbbd9892c5ebffeca1ed620cf0ade13eb55a0d8c84e0751a6653adc6ac40d0c"}, + {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4cc2d67fdb4d057730c75a64c5923abfa17775ae234a71b0200346bfb0a7f482"}, + {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ec28adb204b759306a3d64358a5e5c07d7b1dd0ccbce04aa76cb9377b7b70296"}, + {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1328f6d8cca2b86acb14104e381225a3d7b42c92c4b86ceae814e5c400dbb415"}, + {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8d47ebd9f5d9607fd039c1fbf4994e3b071ea23eff42f4ecef246ab2b7334198"}, + {file = "xxhash-3.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b96d559e0fcddd3343c510a0fe2b127fbff16bf346dd76280b82292567523442"}, + {file = "xxhash-3.5.0-cp310-cp310-win32.whl", hash = "sha256:61c722ed8d49ac9bc26c7071eeaa1f6ff24053d553146d5df031802deffd03da"}, + {file = "xxhash-3.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:9bed5144c6923cc902cd14bb8963f2d5e034def4486ab0bbe1f58f03f042f9a9"}, + {file = "xxhash-3.5.0-cp310-cp310-win_arm64.whl", hash = "sha256:893074d651cf25c1cc14e3bea4fceefd67f2921b1bb8e40fcfeba56820de80c6"}, + {file = "xxhash-3.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02c2e816896dc6f85922ced60097bcf6f008dedfc5073dcba32f9c8dd786f3c1"}, + {file = "xxhash-3.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6027dcd885e21581e46d3c7f682cfb2b870942feeed58a21c29583512c3f09f8"}, + {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1308fa542bbdbf2fa85e9e66b1077eea3a88bef38ee8a06270b4298a7a62a166"}, + {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c28b2fdcee797e1c1961cd3bcd3d545cab22ad202c846235197935e1df2f8ef7"}, + {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:924361811732ddad75ff23e90efd9ccfda4f664132feecb90895bade6a1b4623"}, + {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89997aa1c4b6a5b1e5b588979d1da048a3c6f15e55c11d117a56b75c84531f5a"}, + {file = "xxhash-3.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:685c4f4e8c59837de103344eb1c8a3851f670309eb5c361f746805c5471b8c88"}, + {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbd2ecfbfee70bc1a4acb7461fa6af7748ec2ab08ac0fa298f281c51518f982c"}, + {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:25b5a51dc3dfb20a10833c8eee25903fd2e14059e9afcd329c9da20609a307b2"}, + {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a8fb786fb754ef6ff8c120cb96629fb518f8eb5a61a16aac3a979a9dbd40a084"}, + {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a905ad00ad1e1c34fe4e9d7c1d949ab09c6fa90c919860c1534ff479f40fd12d"}, + {file = "xxhash-3.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:963be41bcd49f53af6d795f65c0da9b4cc518c0dd9c47145c98f61cb464f4839"}, + {file = "xxhash-3.5.0-cp311-cp311-win32.whl", hash = "sha256:109b436096d0a2dd039c355fa3414160ec4d843dfecc64a14077332a00aeb7da"}, + {file = "xxhash-3.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:b702f806693201ad6c0a05ddbbe4c8f359626d0b3305f766077d51388a6bac58"}, + {file = "xxhash-3.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:c4dcb4120d0cc3cc448624147dba64e9021b278c63e34a38789b688fd0da9bf3"}, + {file = "xxhash-3.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:14470ace8bd3b5d51318782cd94e6f94431974f16cb3b8dc15d52f3b69df8e00"}, + {file = "xxhash-3.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:59aa1203de1cb96dbeab595ded0ad0c0056bb2245ae11fac11c0ceea861382b9"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08424f6648526076e28fae6ea2806c0a7d504b9ef05ae61d196d571e5c879c84"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a1ff00674879725b194695e17f23d3248998b843eb5e933007ca743310f793"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f2c61bee5844d41c3eb015ac652a0229e901074951ae48581d58bfb2ba01be"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d32a592cac88d18cc09a89172e1c32d7f2a6e516c3dfde1b9adb90ab5df54a6"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70dabf941dede727cca579e8c205e61121afc9b28516752fd65724be1355cc90"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e5d0ddaca65ecca9c10dcf01730165fd858533d0be84c75c327487c37a906a27"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e5b5e16c5a480fe5f59f56c30abdeba09ffd75da8d13f6b9b6fd224d0b4d0a2"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149b7914451eb154b3dfaa721315117ea1dac2cc55a01bfbd4df7c68c5dd683d"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:eade977f5c96c677035ff39c56ac74d851b1cca7d607ab3d8f23c6b859379cab"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa9f547bd98f5553d03160967866a71056a60960be00356a15ecc44efb40ba8e"}, + {file = "xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8"}, + {file = "xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e"}, + {file = "xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2"}, + {file = "xxhash-3.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37889a0d13b0b7d739cfc128b1c902f04e32de17b33d74b637ad42f1c55101f6"}, + {file = "xxhash-3.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97a662338797c660178e682f3bc180277b9569a59abfb5925e8620fba00b9fc5"}, + {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f85e0108d51092bdda90672476c7d909c04ada6923c14ff9d913c4f7dc8a3bc"}, + {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2fd827b0ba763ac919440042302315c564fdb797294d86e8cdd4578e3bc7f3"}, + {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82085c2abec437abebf457c1d12fccb30cc8b3774a0814872511f0f0562c768c"}, + {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07fda5de378626e502b42b311b049848c2ef38784d0d67b6f30bb5008642f8eb"}, + {file = "xxhash-3.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c279f0d2b34ef15f922b77966640ade58b4ccdfef1c4d94b20f2a364617a493f"}, + {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89e66ceed67b213dec5a773e2f7a9e8c58f64daeb38c7859d8815d2c89f39ad7"}, + {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bcd51708a633410737111e998ceb3b45d3dbc98c0931f743d9bb0a209033a326"}, + {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ff2c0a34eae7df88c868be53a8dd56fbdf592109e21d4bfa092a27b0bf4a7bf"}, + {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e28503dccc7d32e0b9817aa0cbfc1f45f563b2c995b7a66c4c8a0d232e840c7"}, + {file = "xxhash-3.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a6c50017518329ed65a9e4829154626f008916d36295b6a3ba336e2458824c8c"}, + {file = "xxhash-3.5.0-cp313-cp313-win32.whl", hash = "sha256:53a068fe70301ec30d868ece566ac90d873e3bb059cf83c32e76012c889b8637"}, + {file = "xxhash-3.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:80babcc30e7a1a484eab952d76a4f4673ff601f54d5142c26826502740e70b43"}, + {file = "xxhash-3.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:4811336f1ce11cac89dcbd18f3a25c527c16311709a89313c3acaf771def2d4b"}, + {file = "xxhash-3.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6e5f70f6dca1d3b09bccb7daf4e087075ff776e3da9ac870f86ca316736bb4aa"}, + {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e76e83efc7b443052dd1e585a76201e40b3411fe3da7af4fe434ec51b2f163b"}, + {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33eac61d0796ca0591f94548dcfe37bb193671e0c9bcf065789b5792f2eda644"}, + {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ec70a89be933ea49222fafc3999987d7899fc676f688dd12252509434636622"}, + {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86b8e7f703ec6ff4f351cfdb9f428955859537125904aa8c963604f2e9d3e7"}, + {file = "xxhash-3.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0adfbd36003d9f86c8c97110039f7539b379f28656a04097e7434d3eaf9aa131"}, + {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:63107013578c8a730419adc05608756c3fa640bdc6abe806c3123a49fb829f43"}, + {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:683b94dbd1ca67557850b86423318a2e323511648f9f3f7b1840408a02b9a48c"}, + {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:5d2a01dcce81789cf4b12d478b5464632204f4c834dc2d064902ee27d2d1f0ee"}, + {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:a9d360a792cbcce2fe7b66b8d51274ec297c53cbc423401480e53b26161a290d"}, + {file = "xxhash-3.5.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:f0b48edbebea1b7421a9c687c304f7b44d0677c46498a046079d445454504737"}, + {file = "xxhash-3.5.0-cp37-cp37m-win32.whl", hash = "sha256:7ccb800c9418e438b44b060a32adeb8393764da7441eb52aa2aa195448935306"}, + {file = "xxhash-3.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c3bc7bf8cb8806f8d1c9bf149c18708cb1c406520097d6b0a73977460ea03602"}, + {file = "xxhash-3.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:74752ecaa544657d88b1d1c94ae68031e364a4d47005a90288f3bab3da3c970f"}, + {file = "xxhash-3.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dee1316133c9b463aa81aca676bc506d3f80d8f65aeb0bba2b78d0b30c51d7bd"}, + {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:602d339548d35a8579c6b013339fb34aee2df9b4e105f985443d2860e4d7ffaa"}, + {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:695735deeddfb35da1677dbc16a083445360e37ff46d8ac5c6fcd64917ff9ade"}, + {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1030a39ba01b0c519b1a82f80e8802630d16ab95dc3f2b2386a0b5c8ed5cbb10"}, + {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5bc08f33c4966f4eb6590d6ff3ceae76151ad744576b5fc6c4ba8edd459fdec"}, + {file = "xxhash-3.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160e0c19ee500482ddfb5d5570a0415f565d8ae2b3fd69c5dcfce8a58107b1c3"}, + {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f1abffa122452481a61c3551ab3c89d72238e279e517705b8b03847b1d93d738"}, + {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:d5e9db7ef3ecbfc0b4733579cea45713a76852b002cf605420b12ef3ef1ec148"}, + {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:23241ff6423378a731d84864bf923a41649dc67b144debd1077f02e6249a0d54"}, + {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:82b833d5563fefd6fceafb1aed2f3f3ebe19f84760fdd289f8b926731c2e6e91"}, + {file = "xxhash-3.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0a80ad0ffd78bef9509eee27b4a29e56f5414b87fb01a888353e3d5bda7038bd"}, + {file = "xxhash-3.5.0-cp38-cp38-win32.whl", hash = "sha256:50ac2184ffb1b999e11e27c7e3e70cc1139047e7ebc1aa95ed12f4269abe98d4"}, + {file = "xxhash-3.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:392f52ebbb932db566973693de48f15ce787cabd15cf6334e855ed22ea0be5b3"}, + {file = "xxhash-3.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bfc8cdd7f33d57f0468b0614ae634cc38ab9202c6957a60e31d285a71ebe0301"}, + {file = "xxhash-3.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e0c48b6300cd0b0106bf49169c3e0536408dfbeb1ccb53180068a18b03c662ab"}, + {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe1a92cfbaa0a1253e339ccec42dbe6db262615e52df591b68726ab10338003f"}, + {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33513d6cc3ed3b559134fb307aae9bdd94d7e7c02907b37896a6c45ff9ce51bd"}, + {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eefc37f6138f522e771ac6db71a6d4838ec7933939676f3753eafd7d3f4c40bc"}, + {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a606c8070ada8aa2a88e181773fa1ef17ba65ce5dd168b9d08038e2a61b33754"}, + {file = "xxhash-3.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42eca420c8fa072cc1dd62597635d140e78e384a79bb4944f825fbef8bfeeef6"}, + {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:604253b2143e13218ff1ef0b59ce67f18b8bd1c4205d2ffda22b09b426386898"}, + {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6e93a5ad22f434d7876665444a97e713a8f60b5b1a3521e8df11b98309bff833"}, + {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:7a46e1d6d2817ba8024de44c4fd79913a90e5f7265434cef97026215b7d30df6"}, + {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:30eb2efe6503c379b7ab99c81ba4a779748e3830241f032ab46bd182bf5873af"}, + {file = "xxhash-3.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c8aa771ff2c13dd9cda8166d685d7333d389fae30a4d2bb39d63ab5775de8606"}, + {file = "xxhash-3.5.0-cp39-cp39-win32.whl", hash = "sha256:5ed9ebc46f24cf91034544b26b131241b699edbfc99ec5e7f8f3d02d6eb7fba4"}, + {file = "xxhash-3.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:220f3f896c6b8d0316f63f16c077d52c412619e475f9372333474ee15133a558"}, + {file = "xxhash-3.5.0-cp39-cp39-win_arm64.whl", hash = "sha256:a7b1d8315d9b5e9f89eb2933b73afae6ec9597a258d52190944437158b49d38e"}, + {file = "xxhash-3.5.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2014c5b3ff15e64feecb6b713af12093f75b7926049e26a580e94dcad3c73d8c"}, + {file = "xxhash-3.5.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fab81ef75003eda96239a23eda4e4543cedc22e34c373edcaf744e721a163986"}, + {file = "xxhash-3.5.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e2febf914ace002132aa09169cc572e0d8959d0f305f93d5828c4836f9bc5a6"}, + {file = "xxhash-3.5.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5d3a10609c51da2a1c0ea0293fc3968ca0a18bd73838455b5bca3069d7f8e32b"}, + {file = "xxhash-3.5.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5a74f23335b9689b66eb6dbe2a931a88fcd7a4c2cc4b1cb0edba8ce381c7a1da"}, + {file = "xxhash-3.5.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2b4154c00eb22e4d543f472cfca430e7962a0f1d0f3778334f2e08a7ba59363c"}, + {file = "xxhash-3.5.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d30bbc1644f726b825b3278764240f449d75f1a8bdda892e641d4a688b1494ae"}, + {file = "xxhash-3.5.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa0b72f2423e2aa53077e54a61c28e181d23effeaafd73fcb9c494e60930c8e"}, + {file = "xxhash-3.5.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13de2b76c1835399b2e419a296d5b38dc4855385d9e96916299170085ef72f57"}, + {file = "xxhash-3.5.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0691bfcc4f9c656bcb96cc5db94b4d75980b9d5589f2e59de790091028580837"}, + {file = "xxhash-3.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:297595fe6138d4da2c8ce9e72a04d73e58725bb60f3a19048bc96ab2ff31c692"}, + {file = "xxhash-3.5.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc1276d369452040cbb943300dc8abeedab14245ea44056a2943183822513a18"}, + {file = "xxhash-3.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2061188a1ba352fc699c82bff722f4baacb4b4b8b2f0c745d2001e56d0dfb514"}, + {file = "xxhash-3.5.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38c384c434021e4f62b8d9ba0bc9467e14d394893077e2c66d826243025e1f81"}, + {file = "xxhash-3.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e6a4dd644d72ab316b580a1c120b375890e4c52ec392d4aef3c63361ec4d77d1"}, + {file = "xxhash-3.5.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:531af8845aaadcadf951b7e0c1345c6b9c68a990eeb74ff9acd8501a0ad6a1c9"}, + {file = "xxhash-3.5.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ce379bcaa9fcc00f19affa7773084dd09f5b59947b3fb47a1ceb0179f91aaa1"}, + {file = "xxhash-3.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd1b2281d01723f076df3c8188f43f2472248a6b63118b036e641243656b1b0f"}, + {file = "xxhash-3.5.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c770750cc80e8694492244bca7251385188bc5597b6a39d98a9f30e8da984e0"}, + {file = "xxhash-3.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b150b8467852e1bd844387459aa6fbe11d7f38b56e901f9f3b3e6aba0d660240"}, + {file = "xxhash-3.5.0.tar.gz", hash = "sha256:84f2caddf951c9cbf8dc2e22a89d4ccf5d86391ac6418fe81e3c67d0cf60b45f"}, +] + +[[package]] +name = "yapf" +version = "0.43.0" +description = "A formatter for Python code" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "yapf-0.43.0-py3-none-any.whl", hash = "sha256:224faffbc39c428cb095818cf6ef5511fdab6f7430a10783fdfb292ccf2852ca"}, + {file = "yapf-0.43.0.tar.gz", hash = "sha256:00d3aa24bfedff9420b2e0d5d9f5ab6d9d4268e72afbf59bb3fa542781d5218e"}, +] + +[package.dependencies] +platformdirs = ">=3.5.1" +tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} + +[[package]] +name = "yarl" +version = "1.15.2" +description = "Yet another URL library" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e4ee8b8639070ff246ad3649294336b06db37a94bdea0d09ea491603e0be73b8"}, + {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a7cf963a357c5f00cb55b1955df8bbe68d2f2f65de065160a1c26b85a1e44172"}, + {file = "yarl-1.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:43ebdcc120e2ca679dba01a779333a8ea76b50547b55e812b8b92818d604662c"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3433da95b51a75692dcf6cc8117a31410447c75a9a8187888f02ad45c0a86c50"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38d0124fa992dbacd0c48b1b755d3ee0a9f924f427f95b0ef376556a24debf01"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ded1b1803151dd0f20a8945508786d57c2f97a50289b16f2629f85433e546d47"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace4cad790f3bf872c082366c9edd7f8f8f77afe3992b134cfc810332206884f"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c77494a2f2282d9bbbbcab7c227a4d1b4bb829875c96251f66fb5f3bae4fb053"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b7f227ca6db5a9fda0a2b935a2ea34a7267589ffc63c8045f0e4edb8d8dcf956"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:31561a5b4d8dbef1559b3600b045607cf804bae040f64b5f5bca77da38084a8a"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3e52474256a7db9dcf3c5f4ca0b300fdea6c21cca0148c8891d03a025649d935"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0e1af74a9529a1137c67c887ed9cde62cff53aa4d84a3adbec329f9ec47a3936"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:15c87339490100c63472a76d87fe7097a0835c705eb5ae79fd96e343473629ed"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:74abb8709ea54cc483c4fb57fb17bb66f8e0f04438cff6ded322074dbd17c7ec"}, + {file = "yarl-1.15.2-cp310-cp310-win32.whl", hash = "sha256:ffd591e22b22f9cb48e472529db6a47203c41c2c5911ff0a52e85723196c0d75"}, + {file = "yarl-1.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:1695497bb2a02a6de60064c9f077a4ae9c25c73624e0d43e3aa9d16d983073c2"}, + {file = "yarl-1.15.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9fcda20b2de7042cc35cf911702fa3d8311bd40055a14446c1e62403684afdc5"}, + {file = "yarl-1.15.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0545de8c688fbbf3088f9e8b801157923be4bf8e7b03e97c2ecd4dfa39e48e0e"}, + {file = "yarl-1.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fbda058a9a68bec347962595f50546a8a4a34fd7b0654a7b9697917dc2bf810d"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ac2bc069f4a458634c26b101c2341b18da85cb96afe0015990507efec2e417"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd126498171f752dd85737ab1544329a4520c53eed3997f9b08aefbafb1cc53b"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3db817b4e95eb05c362e3b45dafe7144b18603e1211f4a5b36eb9522ecc62bcf"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:076b1ed2ac819933895b1a000904f62d615fe4533a5cf3e052ff9a1da560575c"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f8cfd847e6b9ecf9f2f2531c8427035f291ec286c0a4944b0a9fce58c6446046"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:32b66be100ac5739065496c74c4b7f3015cef792c3174982809274d7e51b3e04"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:34a2d76a1984cac04ff8b1bfc939ec9dc0914821264d4a9c8fd0ed6aa8d4cfd2"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0afad2cd484908f472c8fe2e8ef499facee54a0a6978be0e0cff67b1254fd747"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c68e820879ff39992c7f148113b46efcd6ec765a4865581f2902b3c43a5f4bbb"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:98f68df80ec6ca3015186b2677c208c096d646ef37bbf8b49764ab4a38183931"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c56ec1eacd0a5d35b8a29f468659c47f4fe61b2cab948ca756c39b7617f0aa5"}, + {file = "yarl-1.15.2-cp311-cp311-win32.whl", hash = "sha256:eedc3f247ee7b3808ea07205f3e7d7879bc19ad3e6222195cd5fbf9988853e4d"}, + {file = "yarl-1.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:0ccaa1bc98751fbfcf53dc8dfdb90d96e98838010fc254180dd6707a6e8bb179"}, + {file = "yarl-1.15.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:82d5161e8cb8f36ec778fd7ac4d740415d84030f5b9ef8fe4da54784a1f46c94"}, + {file = "yarl-1.15.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fa2bea05ff0a8fb4d8124498e00e02398f06d23cdadd0fe027d84a3f7afde31e"}, + {file = "yarl-1.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99e12d2bf587b44deb74e0d6170fec37adb489964dbca656ec41a7cd8f2ff178"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:243fbbbf003754fe41b5bdf10ce1e7f80bcc70732b5b54222c124d6b4c2ab31c"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:856b7f1a7b98a8c31823285786bd566cf06226ac4f38b3ef462f593c608a9bd6"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:553dad9af802a9ad1a6525e7528152a015b85fb8dbf764ebfc755c695f488367"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30c3ff305f6e06650a761c4393666f77384f1cc6c5c0251965d6bfa5fbc88f7f"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:353665775be69bbfc6d54c8d134bfc533e332149faeddd631b0bc79df0897f46"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f4fe99ce44128c71233d0d72152db31ca119711dfc5f2c82385ad611d8d7f897"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:9c1e3ff4b89cdd2e1a24c214f141e848b9e0451f08d7d4963cb4108d4d798f1f"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:711bdfae4e699a6d4f371137cbe9e740dc958530cb920eb6f43ff9551e17cfbc"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4388c72174868884f76affcdd3656544c426407e0043c89b684d22fb265e04a5"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f0e1844ad47c7bd5d6fa784f1d4accc5f4168b48999303a868fe0f8597bde715"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a5cafb02cf097a82d74403f7e0b6b9df3ffbfe8edf9415ea816314711764a27b"}, + {file = "yarl-1.15.2-cp312-cp312-win32.whl", hash = "sha256:156ececdf636143f508770bf8a3a0498de64da5abd890c7dbb42ca9e3b6c05b8"}, + {file = "yarl-1.15.2-cp312-cp312-win_amd64.whl", hash = "sha256:435aca062444a7f0c884861d2e3ea79883bd1cd19d0a381928b69ae1b85bc51d"}, + {file = "yarl-1.15.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:416f2e3beaeae81e2f7a45dc711258be5bdc79c940a9a270b266c0bec038fb84"}, + {file = "yarl-1.15.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:173563f3696124372831007e3d4b9821746964a95968628f7075d9231ac6bb33"}, + {file = "yarl-1.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9ce2e0f6123a60bd1a7f5ae3b2c49b240c12c132847f17aa990b841a417598a2"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaea112aed589131f73d50d570a6864728bd7c0c66ef6c9154ed7b59f24da611"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ca3b9f370f218cc2a0309542cab8d0acdfd66667e7c37d04d617012485f904"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23ec1d3c31882b2a8a69c801ef58ebf7bae2553211ebbddf04235be275a38548"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75119badf45f7183e10e348edff5a76a94dc19ba9287d94001ff05e81475967b"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e6fdc976ec966b99e4daa3812fac0274cc28cd2b24b0d92462e2e5ef90d368"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8657d3f37f781d987037f9cc20bbc8b40425fa14380c87da0cb8dfce7c92d0fb"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:93bed8a8084544c6efe8856c362af08a23e959340c87a95687fdbe9c9f280c8b"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:69d5856d526802cbda768d3e6246cd0d77450fa2a4bc2ea0ea14f0d972c2894b"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ccad2800dfdff34392448c4bf834be124f10a5bc102f254521d931c1c53c455a"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:a880372e2e5dbb9258a4e8ff43f13888039abb9dd6d515f28611c54361bc5644"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c998d0558805860503bc3a595994895ca0f7835e00668dadc673bbf7f5fbfcbe"}, + {file = "yarl-1.15.2-cp313-cp313-win32.whl", hash = "sha256:533a28754e7f7439f217550a497bb026c54072dbe16402b183fdbca2431935a9"}, + {file = "yarl-1.15.2-cp313-cp313-win_amd64.whl", hash = "sha256:5838f2b79dc8f96fdc44077c9e4e2e33d7089b10788464609df788eb97d03aad"}, + {file = "yarl-1.15.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fbbb63bed5fcd70cd3dd23a087cd78e4675fb5a2963b8af53f945cbbca79ae16"}, + {file = "yarl-1.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2e93b88ecc8f74074012e18d679fb2e9c746f2a56f79cd5e2b1afcf2a8a786b"}, + {file = "yarl-1.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af8ff8d7dc07ce873f643de6dfbcd45dc3db2c87462e5c387267197f59e6d776"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66f629632220a4e7858b58e4857927dd01a850a4cef2fb4044c8662787165cf7"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:833547179c31f9bec39b49601d282d6f0ea1633620701288934c5f66d88c3e50"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2aa738e0282be54eede1e3f36b81f1e46aee7ec7602aa563e81e0e8d7b67963f"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a13a07532e8e1c4a5a3afff0ca4553da23409fad65def1b71186fb867eeae8d"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c45817e3e6972109d1a2c65091504a537e257bc3c885b4e78a95baa96df6a3f8"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:670eb11325ed3a6209339974b276811867defe52f4188fe18dc49855774fa9cf"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:d417a4f6943112fae3924bae2af7112562285848d9bcee737fc4ff7cbd450e6c"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bc8936d06cd53fddd4892677d65e98af514c8d78c79864f418bbf78a4a2edde4"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:954dde77c404084c2544e572f342aef384240b3e434e06cecc71597e95fd1ce7"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5bc0df728e4def5e15a754521e8882ba5a5121bd6b5a3a0ff7efda5d6558ab3d"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b71862a652f50babab4a43a487f157d26b464b1dedbcc0afda02fd64f3809d04"}, + {file = "yarl-1.15.2-cp38-cp38-win32.whl", hash = "sha256:63eab904f8630aed5a68f2d0aeab565dcfc595dc1bf0b91b71d9ddd43dea3aea"}, + {file = "yarl-1.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:2cf441c4b6e538ba0d2591574f95d3fdd33f1efafa864faa077d9636ecc0c4e9"}, + {file = "yarl-1.15.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a32d58f4b521bb98b2c0aa9da407f8bd57ca81f34362bcb090e4a79e9924fefc"}, + {file = "yarl-1.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:766dcc00b943c089349d4060b935c76281f6be225e39994c2ccec3a2a36ad627"}, + {file = "yarl-1.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bed1b5dbf90bad3bfc19439258c97873eab453c71d8b6869c136346acfe497e7"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed20a4bdc635f36cb19e630bfc644181dd075839b6fc84cac51c0f381ac472e2"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d538df442c0d9665664ab6dd5fccd0110fa3b364914f9c85b3ef9b7b2e157980"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c6cf1d92edf936ceedc7afa61b07e9d78a27b15244aa46bbcd534c7458ee1b"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce44217ad99ffad8027d2fde0269ae368c86db66ea0571c62a000798d69401fb"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47a6000a7e833ebfe5886b56a31cb2ff12120b1efd4578a6fcc38df16cc77bd"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e52f77a0cd246086afde8815039f3e16f8d2be51786c0a39b57104c563c5cbb0"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:f9ca0e6ce7774dc7830dc0cc4bb6b3eec769db667f230e7c770a628c1aa5681b"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:136f9db0f53c0206db38b8cd0c985c78ded5fd596c9a86ce5c0b92afb91c3a19"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:173866d9f7409c0fb514cf6e78952e65816600cb888c68b37b41147349fe0057"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:6e840553c9c494a35e449a987ca2c4f8372668ee954a03a9a9685075228e5036"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:458c0c65802d816a6b955cf3603186de79e8fdb46d4f19abaec4ef0a906f50a7"}, + {file = "yarl-1.15.2-cp39-cp39-win32.whl", hash = "sha256:5b48388ded01f6f2429a8c55012bdbd1c2a0c3735b3e73e221649e524c34a58d"}, + {file = "yarl-1.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:81dadafb3aa124f86dc267a2168f71bbd2bfb163663661ab0038f6e4b8edb810"}, + {file = "yarl-1.15.2-py3-none-any.whl", hash = "sha256:0d3105efab7c5c091609abacad33afff33bdff0035bece164c98bcf5a85ef90a"}, + {file = "yarl-1.15.2.tar.gz", hash = "sha256:a39c36f4218a5bb668b4f06874d676d35a035ee668e6e7e3538835c703634b84"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +propcache = ">=0.2.0" + +[[package]] +name = "yarl" +version = "1.20.1" +description = "Yet another URL library" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6032e6da6abd41e4acda34d75a816012717000fa6839f37124a47fcefc49bec4"}, + {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2c7b34d804b8cf9b214f05015c4fee2ebe7ed05cf581e7192c06555c71f4446a"}, + {file = "yarl-1.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c869f2651cc77465f6cd01d938d91a11d9ea5d798738c1dc077f3de0b5e5fed"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62915e6688eb4d180d93840cda4110995ad50c459bf931b8b3775b37c264af1e"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:41ebd28167bc6af8abb97fec1a399f412eec5fd61a3ccbe2305a18b84fb4ca73"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21242b4288a6d56f04ea193adde174b7e347ac46ce6bc84989ff7c1b1ecea84e"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bea21cdae6c7eb02ba02a475f37463abfe0a01f5d7200121b03e605d6a0439f8"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f8a891e4a22a89f5dde7862994485e19db246b70bb288d3ce73a34422e55b23"}, + {file = "yarl-1.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd803820d44c8853a109a34e3660e5a61beae12970da479cf44aa2954019bf70"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b982fa7f74c80d5c0c7b5b38f908971e513380a10fecea528091405f519b9ebb"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:33f29ecfe0330c570d997bcf1afd304377f2e48f61447f37e846a6058a4d33b2"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:835ab2cfc74d5eb4a6a528c57f05688099da41cf4957cf08cad38647e4a83b30"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:46b5e0ccf1943a9a6e766b2c2b8c732c55b34e28be57d8daa2b3c1d1d4009309"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:df47c55f7d74127d1b11251fe6397d84afdde0d53b90bedb46a23c0e534f9d24"}, + {file = "yarl-1.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76d12524d05841276b0e22573f28d5fbcb67589836772ae9244d90dd7d66aa13"}, + {file = "yarl-1.20.1-cp310-cp310-win32.whl", hash = "sha256:6c4fbf6b02d70e512d7ade4b1f998f237137f1417ab07ec06358ea04f69134f8"}, + {file = "yarl-1.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:aef6c4d69554d44b7f9d923245f8ad9a707d971e6209d51279196d8e8fe1ae16"}, + {file = "yarl-1.20.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:47ee6188fea634bdfaeb2cc420f5b3b17332e6225ce88149a17c413c77ff269e"}, + {file = "yarl-1.20.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0f6500f69e8402d513e5eedb77a4e1818691e8f45e6b687147963514d84b44b"}, + {file = "yarl-1.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a8900a42fcdaad568de58887c7b2f602962356908eedb7628eaf6021a6e435b"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bad6d131fda8ef508b36be3ece16d0902e80b88ea7200f030a0f6c11d9e508d4"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:df018d92fe22aaebb679a7f89fe0c0f368ec497e3dda6cb81a567610f04501f1"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f969afbb0a9b63c18d0feecf0db09d164b7a44a053e78a7d05f5df163e43833"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:812303eb4aa98e302886ccda58d6b099e3576b1b9276161469c25803a8db277d"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98c4a7d166635147924aa0bf9bfe8d8abad6fffa6102de9c99ea04a1376f91e8"}, + {file = "yarl-1.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12e768f966538e81e6e7550f9086a6236b16e26cd964cf4df35349970f3551cf"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fe41919b9d899661c5c28a8b4b0acf704510b88f27f0934ac7a7bebdd8938d5e"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:8601bc010d1d7780592f3fc1bdc6c72e2b6466ea34569778422943e1a1f3c389"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:daadbdc1f2a9033a2399c42646fbd46da7992e868a5fe9513860122d7fe7a73f"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:03aa1e041727cb438ca762628109ef1333498b122e4c76dd858d186a37cec845"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:642980ef5e0fa1de5fa96d905c7e00cb2c47cb468bfcac5a18c58e27dbf8d8d1"}, + {file = "yarl-1.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:86971e2795584fe8c002356d3b97ef6c61862720eeff03db2a7c86b678d85b3e"}, + {file = "yarl-1.20.1-cp311-cp311-win32.whl", hash = "sha256:597f40615b8d25812f14562699e287f0dcc035d25eb74da72cae043bb884d773"}, + {file = "yarl-1.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:26ef53a9e726e61e9cd1cda6b478f17e350fb5800b4bd1cd9fe81c4d91cfeb2e"}, + {file = "yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9"}, + {file = "yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a"}, + {file = "yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd"}, + {file = "yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a"}, + {file = "yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004"}, + {file = "yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5"}, + {file = "yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698"}, + {file = "yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a"}, + {file = "yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3"}, + {file = "yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5"}, + {file = "yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b"}, + {file = "yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1"}, + {file = "yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7"}, + {file = "yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c"}, + {file = "yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d"}, + {file = "yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf"}, + {file = "yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3"}, + {file = "yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458"}, + {file = "yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e"}, + {file = "yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d"}, + {file = "yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f"}, + {file = "yarl-1.20.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e42ba79e2efb6845ebab49c7bf20306c4edf74a0b20fc6b2ccdd1a219d12fad3"}, + {file = "yarl-1.20.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:41493b9b7c312ac448b7f0a42a089dffe1d6e6e981a2d76205801a023ed26a2b"}, + {file = "yarl-1.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f5a5928ff5eb13408c62a968ac90d43f8322fd56d87008b8f9dabf3c0f6ee983"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30c41ad5d717b3961b2dd785593b67d386b73feca30522048d37298fee981805"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:59febc3969b0781682b469d4aca1a5cab7505a4f7b85acf6db01fa500fa3f6ba"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d2b6fb3622b7e5bf7a6e5b679a69326b4279e805ed1699d749739a61d242449e"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:749d73611db8d26a6281086f859ea7ec08f9c4c56cec864e52028c8b328db723"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9427925776096e664c39e131447aa20ec738bdd77c049c48ea5200db2237e000"}, + {file = "yarl-1.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff70f32aa316393eaf8222d518ce9118148eddb8a53073c2403863b41033eed5"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c7ddf7a09f38667aea38801da8b8d6bfe81df767d9dfc8c88eb45827b195cd1c"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:57edc88517d7fc62b174fcfb2e939fbc486a68315d648d7e74d07fac42cec240"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:dab096ce479d5894d62c26ff4f699ec9072269d514b4edd630a393223f45a0ee"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:14a85f3bd2d7bb255be7183e5d7d6e70add151a98edf56a770d6140f5d5f4010"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c89b5c792685dd9cd3fa9761c1b9f46fc240c2a3265483acc1565769996a3f8"}, + {file = "yarl-1.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:69e9b141de5511021942a6866990aea6d111c9042235de90e08f94cf972ca03d"}, + {file = "yarl-1.20.1-cp39-cp39-win32.whl", hash = "sha256:b5f307337819cdfdbb40193cad84978a029f847b0a357fbe49f712063cfc4f06"}, + {file = "yarl-1.20.1-cp39-cp39-win_amd64.whl", hash = "sha256:eae7bfe2069f9c1c5b05fc7fe5d612e5bbc089a39309904ee8b829e322dcad00"}, + {file = "yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77"}, + {file = "yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +propcache = ">=0.2.1" + +[[package]] +name = "zipp" +version = "3.20.2" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.10\"" +files = [ + {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, + {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[[package]] +name = "zipp" +version = "3.23.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, + {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.1" +python-versions = "^3.8" +content-hash = "d434777e91680c55c79766d2629eb9a579370eda9f1d017d0bec0d4ed7c3abef" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..5de05b8 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,101 @@ +[tool.poetry] +name = "modelcache" +version = "0.1.0" +description = "A caching framework for machine learning models" +authors = ["ModelCache Team"] +readme = "README.md" +packages = [ + { include = "modelcache" }, + { include = "modelcache_mm" } +] + +[tool.poetry.dependencies] +python = "^3.8" +cachetools = "5.3.1" +DBUtils = "1.4" +Flask = "3.0.0" +numpy = "1.24.4" +onnxruntime = "1.16.1" +openai = "0.28.1" +pymilvus = "2.3.1" +PyMySQL = "1.1.0" +Requests = "2.31.0" +torch = "2.1.1" +transformers = "4.38.2" +faiss-cpu = "1.7.4" +redis = "5.0.1" +modelscope = "1.14.0" +fastapi = "0.115.5" +uvicorn = "0.32.0" +chromadb = "0.5.23" +elasticsearch = "7.10.0" +snowflake-id = "1.0.2" + +[tool.poetry.group.dev.dependencies] +pytest = "^8.0.0" +pytest-cov = "^5.0.0" +pytest-mock = "^3.14.0" + +[tool.poetry.scripts] +test = "pytest:main" +tests = "pytest:main" + +[tool.pytest.ini_options] +minversion = "8.0" +testpaths = ["tests"] +python_files = ["test_*.py", "*_test.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +addopts = [ + "-ra", + "--strict-markers", + "--cov=modelcache", + "--cov=modelcache_mm", + "--cov-branch", + "--cov-report=term-missing:skip-covered", + "--cov-report=html", + "--cov-report=xml", + "--cov-fail-under=80", + "-v" +] +markers = [ + "unit: Unit tests", + "integration: Integration tests", + "slow: Slow running tests" +] + +[tool.coverage.run] +source = ["modelcache", "modelcache_mm"] +omit = [ + "*/tests/*", + "*/test_*", + "*/__pycache__/*", + "*/site-packages/*", + "*/distutils/*", + "*/venv/*", + "*/.venv/*" +] + +[tool.coverage.report] +precision = 2 +show_missing = true +skip_covered = false +exclude_lines = [ + "pragma: no cover", + "def __repr__", + "if __name__ == .__main__.:", + "raise AssertionError", + "raise NotImplementedError", + "if TYPE_CHECKING:", + "if typing.TYPE_CHECKING:" +] + +[tool.coverage.html] +directory = "htmlcov" + +[tool.coverage.xml] +output = "coverage.xml" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..8688da8 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,218 @@ +""" +Shared pytest fixtures and configuration for modelcache tests. +""" +import os +import tempfile +import shutil +from pathlib import Path +from typing import Iterator, Dict, Any +import pytest +from unittest.mock import MagicMock + + +@pytest.fixture +def temp_dir() -> Iterator[Path]: + """ + Create a temporary directory for test files. + + Yields: + Path: Path to the temporary directory + """ + temp_path = Path(tempfile.mkdtemp()) + yield temp_path + # Cleanup after test + if temp_path.exists(): + shutil.rmtree(temp_path) + + +@pytest.fixture +def mock_config() -> Dict[str, Any]: + """ + Provide a mock configuration dictionary for testing. + + Returns: + Dict[str, Any]: Mock configuration + """ + return { + "cache_dir": "/tmp/test_cache", + "max_cache_size": 1000, + "ttl": 3600, + "embedding_model": "test-model", + "similarity_threshold": 0.8, + "vector_dimension": 768, + "batch_size": 32, + "database": { + "type": "memory", + "host": "localhost", + "port": 6379, + "password": None + } + } + + +@pytest.fixture +def mock_embedding(): + """ + Mock embedding object for testing. + + Returns: + MagicMock: Mock embedding with common methods + """ + mock = MagicMock() + mock.embed.return_value = [0.1] * 768 # Default 768-dim embedding + mock.embed_batch.return_value = [[0.1] * 768] * 10 + mock.dimension = 768 + mock.model_name = "test-embedding-model" + return mock + + +@pytest.fixture +def mock_cache_manager(): + """ + Mock cache manager for testing. + + Returns: + MagicMock: Mock cache manager with common methods + """ + mock = MagicMock() + mock.get.return_value = None + mock.set.return_value = True + mock.delete.return_value = True + mock.clear.return_value = True + mock.size.return_value = 0 + return mock + + +@pytest.fixture +def sample_vector_data(): + """ + Sample vector data for testing vector operations. + + Returns: + Dict[str, Any]: Sample vector data + """ + return { + "id": "test_vector_001", + "vector": [0.1, 0.2, 0.3, 0.4, 0.5] * 153 + [0.6, 0.7, 0.8], # 768 dimensions + "metadata": { + "source": "test", + "timestamp": 1234567890, + "model": "test-model" + } + } + + +@pytest.fixture +def mock_redis_client(): + """ + Mock Redis client for testing Redis-based operations. + + Returns: + MagicMock: Mock Redis client + """ + mock = MagicMock() + mock.get.return_value = None + mock.set.return_value = True + mock.delete.return_value = 1 + mock.exists.return_value = 0 + mock.expire.return_value = True + mock.ttl.return_value = -2 + return mock + + +@pytest.fixture +def mock_milvus_client(): + """ + Mock Milvus client for testing vector database operations. + + Returns: + MagicMock: Mock Milvus client + """ + mock = MagicMock() + mock.create_collection.return_value = True + mock.insert.return_value = MagicMock(primary_keys=[1, 2, 3]) + mock.search.return_value = [[]] + mock.query.return_value = [] + mock.delete.return_value = MagicMock(delete_count=1) + return mock + + +@pytest.fixture(autouse=True) +def reset_environment(): + """ + Reset environment variables before each test. + """ + # Store original env vars + original_env = os.environ.copy() + + # Set test environment variables + os.environ["MODELCACHE_ENV"] = "test" + os.environ["MODELCACHE_LOG_LEVEL"] = "DEBUG" + + yield + + # Restore original env vars + os.environ.clear() + os.environ.update(original_env) + + +@pytest.fixture +def sample_text_data(): + """ + Sample text data for testing text processing. + + Returns: + List[str]: List of sample texts + """ + return [ + "This is a test sentence for modelcache.", + "Machine learning models need efficient caching.", + "Vector embeddings help with semantic search.", + "Testing is important for code quality.", + "PyTest makes testing in Python easier." + ] + + +@pytest.fixture +def mock_http_response(): + """ + Mock HTTP response for testing API calls. + + Returns: + MagicMock: Mock response object + """ + mock = MagicMock() + mock.status_code = 200 + mock.json.return_value = {"status": "success", "data": {}} + mock.text = '{"status": "success", "data": {}}' + mock.headers = {"Content-Type": "application/json"} + return mock + + +# Pytest configuration hooks +def pytest_configure(config): + """ + Configure pytest with custom settings. + """ + # Add custom markers description + config.addinivalue_line( + "markers", "unit: mark test as a unit test" + ) + config.addinivalue_line( + "markers", "integration: mark test as an integration test" + ) + config.addinivalue_line( + "markers", "slow: mark test as slow running" + ) + + +def pytest_collection_modifyitems(config, items): + """ + Modify test collection to add markers based on test location. + """ + for item in items: + # Auto-mark tests based on their location + if "unit" in str(item.fspath): + item.add_marker(pytest.mark.unit) + elif "integration" in str(item.fspath): + item.add_marker(pytest.mark.integration) \ No newline at end of file diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_setup_validation.py b/tests/test_setup_validation.py new file mode 100644 index 0000000..c5ed40b --- /dev/null +++ b/tests/test_setup_validation.py @@ -0,0 +1,143 @@ +""" +Validation tests to ensure the testing infrastructure is set up correctly. +""" +import pytest +import sys +from pathlib import Path + + +class TestSetupValidation: + """Test class to validate the testing infrastructure setup.""" + + def test_pytest_installed(self): + """Verify pytest is installed and importable.""" + import pytest + assert pytest.__version__ + + def test_pytest_cov_installed(self): + """Verify pytest-cov is installed.""" + import pytest_cov + assert pytest_cov + + def test_pytest_mock_installed(self): + """Verify pytest-mock is installed.""" + import pytest_mock + assert pytest_mock + + def test_modelcache_importable(self): + """Verify the main modelcache package can be imported.""" + import modelcache + assert modelcache + + def test_project_structure(self): + """Verify the expected project structure exists.""" + project_root = Path(__file__).parent.parent + + # Check main directories + assert (project_root / "modelcache").exists() + assert (project_root / "modelcache_mm").exists() + assert (project_root / "tests").exists() + assert (project_root / "tests" / "unit").exists() + assert (project_root / "tests" / "integration").exists() + + # Check configuration files + assert (project_root / "pyproject.toml").exists() + + @pytest.mark.unit + def test_unit_marker(self): + """Test that unit marker works correctly.""" + assert True + + @pytest.mark.integration + def test_integration_marker(self): + """Test that integration marker works correctly.""" + assert True + + @pytest.mark.slow + def test_slow_marker(self): + """Test that slow marker works correctly.""" + assert True + + def test_fixtures_available(self, temp_dir, mock_config, mock_embedding): + """Test that custom fixtures are available and working.""" + # Test temp_dir fixture + assert temp_dir.exists() + assert temp_dir.is_dir() + + # Test mock_config fixture + assert isinstance(mock_config, dict) + assert "cache_dir" in mock_config + assert "embedding_model" in mock_config + + # Test mock_embedding fixture + assert hasattr(mock_embedding, "embed") + assert hasattr(mock_embedding, "dimension") + + def test_sample_data_fixtures(self, sample_vector_data, sample_text_data): + """Test that sample data fixtures provide expected data.""" + # Test vector data + assert isinstance(sample_vector_data, dict) + assert "id" in sample_vector_data + assert "vector" in sample_vector_data + assert len(sample_vector_data["vector"]) == 768 + + # Test text data + assert isinstance(sample_text_data, list) + assert len(sample_text_data) > 0 + assert all(isinstance(text, str) for text in sample_text_data) + + def test_mock_fixtures(self, mock_redis_client, mock_milvus_client, mock_cache_manager): + """Test that mock fixtures are properly configured.""" + # Test Redis mock + assert mock_redis_client.get("test") is None + assert mock_redis_client.set("test", "value") is True + + # Test Milvus mock + assert hasattr(mock_milvus_client, "search") + assert hasattr(mock_milvus_client, "insert") + + # Test cache manager mock + assert mock_cache_manager.get("test") is None + assert mock_cache_manager.set("test", "value") is True + + def test_environment_reset(self): + """Test that environment is properly set for testing.""" + import os + assert os.environ.get("MODELCACHE_ENV") == "test" + assert os.environ.get("MODELCACHE_LOG_LEVEL") == "DEBUG" + + def test_coverage_configured(self): + """Test that coverage is properly configured.""" + # This test will be meaningful when running with coverage + # For now, just ensure the test runs + assert True + + +@pytest.mark.unit +class TestUnitTestValidation: + """Validate unit test setup.""" + + def test_unit_tests_discoverable(self): + """Ensure unit tests can be discovered and run.""" + assert True + + def test_unit_test_isolation(self, temp_dir): + """Ensure unit tests have proper isolation with temp directories.""" + test_file = temp_dir / "test.txt" + test_file.write_text("test content") + assert test_file.exists() + assert test_file.read_text() == "test content" + + +@pytest.mark.integration +class TestIntegrationTestValidation: + """Validate integration test setup.""" + + def test_integration_tests_discoverable(self): + """Ensure integration tests can be discovered and run.""" + assert True + + def test_integration_mock_available(self, mock_http_response): + """Ensure integration tests have access to HTTP mocks.""" + assert mock_http_response.status_code == 200 + assert mock_http_response.json() == {"status": "success", "data": {}} \ No newline at end of file diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 From 15da47de3907aee43ee47244c6e16c35457f9aec Mon Sep 17 00:00:00 2001 From: Yuval-Roth Date: Mon, 16 Jun 2025 17:08:57 +0300 Subject: [PATCH 97/98] Optimizations and fixes * Updates to EmbeddingDispatcher: Added catch for exceptions in worker, set the processes to run in high priority * Offloaded some CPU-intensive and blocking code in adapt_insert and adapt_query to a background thread instead of having it run on the main asyncio event-loop * Fixed not inserting into memory cache after memory cache miss. * Fixes in WTINYLFU memory cache class. * Replaced hardcoded similarity threshold in cosine similarity with dynamic value Co-authored-by: olgaoznovich Co-authored-by: Yuval-Roth Co-authored-by: omerdor001 Co-authored-by: adiaybgu --- .gitignore | 2 +- modelcache/adapter/adapter.py | 8 ++-- modelcache/adapter/adapter_insert.py | 4 +- modelcache/adapter/adapter_query.py | 22 +++++---- modelcache/adapter/adapter_register.py | 9 +++- modelcache/adapter/adapter_remove.py | 16 +++++-- modelcache/cache.py | 48 +++++++++---------- modelcache/embedding/base.py | 18 ++++++- modelcache/embedding/embedding_dispatcher.py | 21 +++++--- modelcache/manager/data_manager.py | 21 ++++---- modelcache/manager/eviction/wtinylfu_cache.py | 15 ++++-- modelcache/manager/scalar_data/sql_storage.py | 6 +-- websocket4modelcache.py | 2 +- 13 files changed, 116 insertions(+), 76 deletions(-) diff --git a/.gitignore b/.gitignore index 0e57011..e31cca2 100644 --- a/.gitignore +++ b/.gitignore @@ -93,7 +93,7 @@ celerybeat.pid # Environments .env -.venv +.venv* env/ venv/ ENV/ diff --git a/modelcache/adapter/adapter.py b/modelcache/adapter/adapter.py index d62278d..2425539 100644 --- a/modelcache/adapter/adapter.py +++ b/modelcache/adapter/adapter.py @@ -35,9 +35,9 @@ async def create_insert(cls, *args, **kwargs): return str(e) @classmethod - def create_remove(cls, *args, **kwargs): + async def create_remove(cls, *args, **kwargs): try: - return adapt_remove( + return await adapt_remove( *args, **kwargs ) @@ -46,9 +46,9 @@ def create_remove(cls, *args, **kwargs): return str(e) @classmethod - def create_register(cls, *args, **kwargs): + async def create_register(cls, *args, **kwargs): try: - return adapt_register( + return await adapt_register( *args, **kwargs ) diff --git a/modelcache/adapter/adapter_insert.py b/modelcache/adapter/adapter_insert.py index b4d1f6a..757e0c8 100644 --- a/modelcache/adapter/adapter_insert.py +++ b/modelcache/adapter/adapter_insert.py @@ -16,7 +16,6 @@ async def adapt_insert(*args, **kwargs): pre_embedding_data_list = [] embedding_futures_list = [] - # embedding_data_list = [] llm_data_list = [] for row in chat_info: @@ -37,7 +36,8 @@ async def adapt_insert(*args, **kwargs): embedding_data_list = await asyncio.gather(*embedding_futures_list) - chat_cache.data_manager.save( + await asyncio.to_thread( + chat_cache.data_manager.save, pre_embedding_data_list, llm_data_list, embedding_data_list, diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index 2eb8fa4..7ce1ba0 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import asyncio import logging from modelcache.embedding import MetricType from modelcache.utils.time import time_cal @@ -24,17 +25,20 @@ async def adapt_query(cache_data_convert, *args, **kwargs): cache_obj=chat_cache )(pre_embedding_data) - cache_data_list = time_cal( + search_time_cal = time_cal( chat_cache.data_manager.search, func_name="vector_search", report_func=chat_cache.report.search, cache_obj=chat_cache - )( + ) + cache_data_list = await asyncio.to_thread( + search_time_cal, embedding_data, extra_param=context.get("search_func", None), top_k=kwargs.pop("top_k", -1), model=model ) + cache_answers = [] cache_questions = [] cache_ids = [] @@ -43,7 +47,7 @@ async def adapt_query(cache_data_convert, *args, **kwargs): if chat_cache.similarity_metric_type == MetricType.COSINE: cosine_similarity = cache_data_list[0][0] # This code uses the built-in cosine similarity evaluation in milvus - if cosine_similarity < 0.9: + if cosine_similarity < chat_cache.similarity_threshold: return None elif chat_cache.similarity_metric_type == MetricType.L2: ## this is the code that uses L2 for similarity evaluation @@ -87,8 +91,9 @@ async def adapt_query(cache_data_convert, *args, **kwargs): reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=False) for cache_data in cache_data_list: primary_id = cache_data[1] - ret = chat_cache.data_manager.get_scalar_data( - cache_data, extra_param=context.get("get_scalar_data", None),model=model + ret = await asyncio.to_thread( + chat_cache.data_manager.get_scalar_data, + cache_data, extra_param=context.get("get_scalar_data", None), model=model ) if ret is None: continue @@ -133,8 +138,9 @@ async def adapt_query(cache_data_convert, *args, **kwargs): # 不使用 reranker 时,走原来的逻辑 for cache_data in cache_data_list: primary_id = cache_data[1] - ret = chat_cache.data_manager.get_scalar_data( - cache_data, extra_param=context.get("get_scalar_data", None),model=model + ret = await asyncio.to_thread( + chat_cache.data_manager.get_scalar_data, + cache_data, extra_param=context.get("get_scalar_data", None), model=model ) if ret is None: continue @@ -204,7 +210,7 @@ async def adapt_query(cache_data_convert, *args, **kwargs): ) # 更新命中次数 try: - chat_cache.data_manager.update_hit_count(return_id) + asyncio.create_task(asyncio.to_thread(chat_cache.data_manager.update_hit_count,return_id)) except Exception: logging.info('update_hit_count except, please check!') diff --git a/modelcache/adapter/adapter_register.py b/modelcache/adapter/adapter_register.py index 811bcec..c4d0317 100644 --- a/modelcache/adapter/adapter_register.py +++ b/modelcache/adapter/adapter_register.py @@ -1,11 +1,16 @@ # -*- coding: utf-8 -*- +import asyncio -def adapt_register(*args, **kwargs): +async def adapt_register(*args, **kwargs): chat_cache = kwargs.pop("cache_obj") model = kwargs.pop("model", None) if model is None or len(model) == 0: return ValueError('') - register_resp = chat_cache.data_manager.create_index(model) + register_resp = await asyncio.to_thread( + chat_cache.data_manager.create_index, + model + ) + return register_resp diff --git a/modelcache/adapter/adapter_remove.py b/modelcache/adapter/adapter_remove.py index ca00fc1..aea0284 100644 --- a/modelcache/adapter/adapter_remove.py +++ b/modelcache/adapter/adapter_remove.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- -from modelcache.utils.error import NotInitError, RemoveError +import asyncio +from modelcache.utils.error import RemoveError -def adapt_remove(*args, **kwargs): + +async def adapt_remove(*args, **kwargs): chat_cache = kwargs.pop("cache_obj") model = kwargs.pop("model", None) remove_type = kwargs.pop("remove_type", None) @@ -13,9 +15,15 @@ def adapt_remove(*args, **kwargs): # delete data if remove_type == 'delete_by_id': id_list = kwargs.pop("id_list", []) - resp = chat_cache.data_manager.delete(id_list, model=model) + resp = await asyncio.to_thread( + chat_cache.data_manager.delete, + id_list, model=model + ) elif remove_type == 'truncate_by_model': - resp = chat_cache.data_manager.truncate(model) + resp = await asyncio.to_thread( + chat_cache.data_manager.truncate, + model + ) else: # resp = "remove_type_error" raise RemoveError() diff --git a/modelcache/cache.py b/modelcache/cache.py index 76d98fc..9aaa224 100644 --- a/modelcache/cache.py +++ b/modelcache/cache.py @@ -27,13 +27,6 @@ #==================== Cache class definition =========================# #=====================================================================# -executor = ThreadPoolExecutor(max_workers=2) - -def response_text(cache_resp): - return cache_resp['data'] - -def response_hitquery(cache_resp): - return cache_resp['hitQuery'] # noinspection PyMethodMayBeStatic class Cache: @@ -80,11 +73,16 @@ def close(): modelcache_log.error(e) def save_query_resp(self, query_resp_dict, **kwargs): - self.data_manager.save_query_resp(query_resp_dict, **kwargs) + asyncio.create_task(asyncio.to_thread( + self.data_manager.save_query_resp, + query_resp_dict, **kwargs + )) def save_query_info(self,result, model, query, delta_time_log): - self.data_manager.save_query_resp(result, model=model, query=json.dumps(query, ensure_ascii=False), - delta_time=delta_time_log) + asyncio.create_task(asyncio.to_thread( + self.data_manager.save_query_resp, + result, model=model, query=json.dumps(query, ensure_ascii=False), delta_time=delta_time_log + )) async def handle_request(self, param_dict: dict): # param parsing @@ -103,7 +101,7 @@ async def handle_request(self, param_dict: dict): result = {"errorCode": 102, "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - self.data_manager.save_query_resp(result, model=model, query='', delta_time=0) + self.save_query_resp(result, model=model, query='', delta_time=0) return result except Exception as e: return {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', @@ -120,14 +118,14 @@ async def handle_request(self, param_dict: dict): elif request_type == 'insert': return await self.handle_insert(chat_info, model) elif request_type == 'remove': - return self.handle_remove(model, param_dict) + return await self.handle_remove(model, param_dict) elif request_type == 'register': - return self.handle_register(model) + return await self.handle_register(model) else: return {"errorCode": 400, "errorDesc": "bad request"} - def handle_register(self, model): - response = adapter.ChatCompletion.create_register( + async def handle_register(self, model): + response = await adapter.ChatCompletion.create_register( model=model, cache_obj=self ) @@ -137,10 +135,10 @@ def handle_register(self, model): result = {"errorCode": 502, "errorDesc": "", "response": response, "writeStatus": "exception"} return result - def handle_remove(self, model, param_dict): + async def handle_remove(self, model, param_dict): remove_type = param_dict.get("remove_type") id_list = param_dict.get("id_list", []) - response = adapter.ChatCompletion.create_remove( + response = await adapter.ChatCompletion.create_remove( model=model, remove_type=remove_type, id_list=id_list, @@ -191,12 +189,12 @@ async def handle_query(self, model, query): result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} else: - answer = response_text(response) - hit_query = response_hitquery(response) + answer = response['data'] + hit_query = response['hitQuery'] result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, "hit_query": hit_query, "answer": answer} delta_time_log = round(time.time() - start_time, 2) - executor.submit(self.save_query_info, result, model, query, delta_time_log) + self.save_query_info(result, model, query, delta_time_log) except Exception as e: result = {"errorCode": 202, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} @@ -265,7 +263,9 @@ async def init( #==================================================# # switching based on embedding_model - if embedding_model == EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2: + if (embedding_model == EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2 + or embedding_model == EmbeddingModel.HUGGINGFACE_ALL_MINILM_L6_V2 + or embedding_model == EmbeddingModel.HUGGINGFACE_ALL_MINILM_L12_V2): query_pre_embedding_func = query_with_role insert_pre_embedding_func = query_with_role post_process_messages_func = first @@ -287,8 +287,8 @@ async def init( # add more configurations for other embedding models as needed else: - modelcache_log.error(f"Please add configuration for {embedding_model} in modelcache/__init__.py.") - raise CacheError(f"Please add configuration for {embedding_model} in modelcache/__init__.py.") + modelcache_log.error(f"Please add configuration for {embedding_model} in modelcache/cache.py.") + raise CacheError(f"Please add configuration for {embedding_model} in modelcache/cache.py.") # ====================== Data manager ==============================# @@ -300,7 +300,7 @@ async def init( config=vector_config, metric_type=similarity_metric_type, ), - eviction='ARC', + memory_cache_policy='ARC', max_size=10000, normalize=normalize, ) diff --git a/modelcache/embedding/base.py b/modelcache/embedding/base.py index aafdc97..70ae240 100644 --- a/modelcache/embedding/base.py +++ b/modelcache/embedding/base.py @@ -1,8 +1,12 @@ # -*- coding: utf-8 -*- from abc import abstractmethod, ABCMeta +from modelcache.utils.error import CacheError from modelcache.utils.lazy_import import LazyImport from enum import Enum + +from modelcache.utils.log import modelcache_log + huggingface = LazyImport("huggingface", globals(), "modelcache.embedding.huggingface") data2vec = LazyImport("data2vec", globals(), "modelcache.embedding.data2vec") llmEmb = LazyImport("llmEmb", globals(), "modelcache.embedding.llmEmb") @@ -21,7 +25,7 @@ class EmbeddingModel(Enum): HUGGINGFACE_ALL_MPNET_BASE_V2 = {"dimension":768, "model_path":"sentence-transformers/all-mpnet-base-v2"} HUGGINGFACE_ALL_MINILM_L6_V2 = {"dimension":384, "model_path":"sentence-transformers/all-MiniLM-L6-v2"} HUGGINGFACE_ALL_MINILM_L12_V2 = {"dimension":384, "model_path":"sentence-transformers/all-MiniLM-L12-v2"} - DATA2VEC_AUDIO = {"dimension":None, "model_path":"model/text2vec-base-chinese/"} + DATA2VEC_AUDIO = {"dimension":768, "model_path":"model/text2vec-base-chinese/"} LLM_EMB2VEC_AUDIO = {"dimension":None, "model_path":None} FASTTEXT = {"dimension":None, "model_path":None} PADDLE_NLP = {"dimension":None, "model_path":None} @@ -68,6 +72,14 @@ def get(model:EmbeddingModel, **kwargs): model_path = kwargs.pop("model_path","sentence-transformers/all-mpnet-base-v2") return huggingface.Huggingface(model_path) + elif model == EmbeddingModel.HUGGINGFACE_ALL_MINILM_L6_V2: + model_path = kwargs.pop("model_path","sentence-transformers/all-MiniLM-L6-v2") + return huggingface.Huggingface(model_path) + + elif model == EmbeddingModel.HUGGINGFACE_ALL_MINILM_L12_V2: + model_path = kwargs.pop("model_path","sentence-transformers/all-MiniLM-L12-v2") + return huggingface.Huggingface(model_path) + elif model == EmbeddingModel.DATA2VEC_AUDIO: model_path = kwargs.pop("model_path","model/text2vec-base-chinese/") return data2vec.Data2VecAudio(model_path) @@ -99,5 +111,7 @@ def get(model:EmbeddingModel, **kwargs): return bge_m3.BgeM3Embedding(model_path) else: - raise ValueError(f"Unsupported embedding model: {model}") + modelcache_log.error(f"Please add configuration for {model} in modelcache/embedding/base.py.") + raise CacheError(f"Please add configuration for {model} in modelcache/embedding/base.py.") + diff --git a/modelcache/embedding/embedding_dispatcher.py b/modelcache/embedding/embedding_dispatcher.py index 358cff1..56b5e27 100644 --- a/modelcache/embedding/embedding_dispatcher.py +++ b/modelcache/embedding/embedding_dispatcher.py @@ -2,6 +2,7 @@ import threading import uuid import asyncio +import psutil from asyncio import Future, AbstractEventLoop from modelcache.embedding import EmbeddingModel @@ -11,13 +12,18 @@ def worker_func(embedding_model: EmbeddingModel, model_path, task_queue, result_queue, worker_id): base_embedding = BaseEmbedding.get(embedding_model, model_path=model_path) print(f"Embedding worker {worker_id} started.") - while True: - job_id, data = task_queue.get() - try: - result = base_embedding.to_embeddings(data) - except Exception as e: - result = e - result_queue.put((job_id, result)) + try: + while True: + job_id, data = task_queue.get() + try: + result = base_embedding.to_embeddings(data) + except Exception as e: + result = e + result_queue.put((job_id, result)) + except KeyboardInterrupt: + print(f"Embedding worker {worker_id} stopped.") + except Exception as e: + print(f"Embedding worker {worker_id} encountered an error: {e}") class EmbeddingDispatcher: @@ -46,6 +52,7 @@ def __init__( ) p.daemon = True p.start() + psutil.Process(p.pid).nice(psutil.HIGH_PRIORITY_CLASS) self.workers.append(p) def _start_result_collector_thread(self): diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index c12f414..5a32c01 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -69,7 +69,7 @@ def get( object_base: Union[ObjectBase, str] = None, max_size: int = 3, clean_size: int = 1, - eviction: str = "ARC", + memory_cache_policy: str = "ARC", data_path: str = "data_map.txt", get_data_container: Callable = None, normalize: bool = True @@ -84,7 +84,7 @@ def get( if isinstance(object_base, str): object_base = ObjectBase.get(name=object_base) assert cache_base and vector_base - return SSDataManager(cache_base, vector_base, object_base, max_size, clean_size,normalize, eviction) + return SSDataManager(cache_base, vector_base, object_base, max_size, clean_size,normalize, memory_cache_policy) class MapDataManager(DataManager): @@ -229,22 +229,18 @@ def import_data( normalize(embedding_data) for embedding_data in embedding_datas ] - for i, embedding_data in enumerate(embedding_datas): + for embedding_data, answer, question in zip(embedding_datas,answers,questions): if self.o is not None: - ans = self._process_answer_data(answers[i]) - else: - ans = answers[i] + answer = self._process_answer_data(answer) - question = questions[i] embedding_data = embedding_data.astype("float32") - cache_datas.append([ans, question, embedding_data, model]) + cache_datas.append([answer, question, embedding_data, model]) ids = self.s.batch_insert(cache_datas) datas = [] - for i, embedding_data in enumerate(embedding_datas): - _id = ids[i] + for _id,embedding_data,cache_data in zip(ids,embedding_datas,cache_datas): datas.append(VectorData(id=_id, data=embedding_data.astype("float32"))) - self.eviction_base.put([(_id, cache_datas[i])],model=model) + self.eviction_base.put([(_id, cache_data)],model=model) self.v.mul_add(datas,model) def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: @@ -254,9 +250,10 @@ def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: cache_hit = self.eviction_base.get(_id, model=model) if cache_hit is not None: return cache_hit - cache_data = self.s.get_data_by_id(res_data[1]) + cache_data = self.s.get_data_by_id(_id) if cache_data is None: return None + self.eviction_base.put([(_id, cache_data)], model=model) return cache_data def update_hit_count(self, primary_id, **kwargs): diff --git a/modelcache/manager/eviction/wtinylfu_cache.py b/modelcache/manager/eviction/wtinylfu_cache.py index d5887f5..1f41447 100644 --- a/modelcache/manager/eviction/wtinylfu_cache.py +++ b/modelcache/manager/eviction/wtinylfu_cache.py @@ -1,4 +1,4 @@ -from cachetools import LRUCache, Cache +from cachetools import LRUCache, Cache, LFUCache from readerwriterlock import rwlock import random @@ -35,16 +35,21 @@ def decay(self): table[i] >>= 1 class W2TinyLFU(Cache): - def __init__(self, maxsize, window_pct=1): + def __init__(self, maxsize, window_pct=0.01): + """ + param maxsize: Maximum size of the cache. + + param window_pct: Percentage of the cache size to be used for the window. + """ super().__init__(maxsize) - self.window_size = max(1, int(maxsize * window_pct / 100)) + self.window_size = max(1, int(maxsize * window_pct)) rest = maxsize - self.window_size self.probation_size = rest // 2 self.protected_size = rest - self.probation_size self.window = LRUCache(maxsize=self.window_size) - self.probation = LRUCache(maxsize=self.probation_size) - self.protected = LRUCache(maxsize=self.protected_size) + self.probation = LFUCache(maxsize=self.probation_size) + self.protected = LFUCache(maxsize=self.protected_size) self.cms = CountMinSketch() self.data = {} diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index c24a024..5683f61 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -1,11 +1,9 @@ # -*- coding: utf-8 -*- -import os -import time import uuid import pymysql import json -import base64 +import numpy as np from typing import List from modelcache.manager.scalar_data.base import CacheStorage, CacheData from DBUtils.PooledDB import PooledDB @@ -147,7 +145,7 @@ def get_data_by_id(self, key: int): conn.close() if resp is not None and len(resp) == 4: - return resp + return resp[0], resp[1], np.frombuffer(resp[2], dtype=np.float32), resp[3] else: return None diff --git a/websocket4modelcache.py b/websocket4modelcache.py index 4543f03..6aaed64 100644 --- a/websocket4modelcache.py +++ b/websocket4modelcache.py @@ -15,7 +15,7 @@ async def lifespan(app: FastAPI): sql_storage="mysql", vector_storage="milvus", embedding_model=EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2, - embedding_workers_num=6 + embedding_workers_num=8 ) yield From a13ec496f49eefd2fcadba420f933c6f59621093 Mon Sep 17 00:00:00 2001 From: Yuval-Roth Date: Thu, 26 Jun 2025 18:10:16 +0300 Subject: [PATCH 98/98] Added documentation Co-authored-by: olgaoznovich Co-authored-by: Yuval-Roth Co-authored-by: omerdor001 Co-authored-by: adiaybgu --- modelcache/adapter/adapter_insert.py | 18 +++- modelcache/adapter/adapter_query.py | 32 ++++-- modelcache/cache.py | 98 ++++++++++++++++--- modelcache/embedding/embedding_dispatcher.py | 32 +++--- modelcache/manager/data_manager.py | 63 ++++++++++-- modelcache/manager/eviction/arc_cache.py | 47 +++++++-- modelcache/manager/eviction/memory_cache.py | 1 + modelcache/manager/eviction/wtinylfu_cache.py | 64 +++++++++--- modelcache/manager/scalar_data/sql_storage.py | 1 + modelcache/manager/vector_data/base.py | 1 + 10 files changed, 287 insertions(+), 70 deletions(-) diff --git a/modelcache/adapter/adapter_insert.py b/modelcache/adapter/adapter_insert.py index 757e0c8..a2f224a 100644 --- a/modelcache/adapter/adapter_insert.py +++ b/modelcache/adapter/adapter_insert.py @@ -9,23 +9,31 @@ async def adapt_insert(*args, **kwargs): chat_cache = kwargs.pop("cache_obj") model = kwargs.pop("model", None) require_object_store = kwargs.pop("require_object_store", False) + + # Validate object store availability if required if require_object_store: assert chat_cache.data_manager.o, "Object store is required for adapter." + context = kwargs.pop("cache_context", {}) chat_info = kwargs.pop("chat_info", []) - pre_embedding_data_list = [] - embedding_futures_list = [] - llm_data_list = [] + # Initialize collections for parallel processing + pre_embedding_data_list = [] # Preprocessed data ready for embedding + embedding_futures_list = [] # Async embedding generation tasks + llm_data_list = [] # Extracted LLM response data + # Process each chat entry and prepare for parallel embedding generation for row in chat_info: + # Preprocess chat data using configured preprocessing function pre_embedding_data = chat_cache.insert_pre_embedding_func( row, extra_param=context.get("pre_embedding_func", None), prompts=chat_cache.prompts, ) pre_embedding_data_list.append(pre_embedding_data) - llm_data_list.append(row['answer']) + llm_data_list.append(row['answer']) # Extract answer text for storage + + # Create async embedding generation task with performance monitoring embedding_future = time_cal( chat_cache.embedding_func, func_name="embedding", @@ -34,8 +42,10 @@ async def adapt_insert(*args, **kwargs): )(pre_embedding_data) embedding_futures_list.append(embedding_future) + # Wait for all embedding generation tasks to complete in parallel embedding_data_list = await asyncio.gather(*embedding_futures_list) + # Save all processed data to the data manager asynchronously await asyncio.to_thread( chat_cache.data_manager.save, pre_embedding_data_list, diff --git a/modelcache/adapter/adapter_query.py b/modelcache/adapter/adapter_query.py index 7ce1ba0..d98e503 100644 --- a/modelcache/adapter/adapter_query.py +++ b/modelcache/adapter/adapter_query.py @@ -8,16 +8,21 @@ USE_RERANKER = False # 如果为 True 则启用 reranker,否则使用原有逻辑 async def adapt_query(cache_data_convert, *args, **kwargs): + # Extract query parameters chat_cache = kwargs.pop("cache_obj") scope = kwargs.pop("scope") model = scope['model'] context = kwargs.pop("cache_context", {}) cache_factor = kwargs.pop("cache_factor", 1.0) + + # Preprocess query for embedding generation pre_embedding_data = chat_cache.query_pre_embedding_func( kwargs, extra_param=context.get("pre_embedding_func", None), prompts=chat_cache.prompts, ) + + # Generate embedding with performance monitoring embedding_data = await time_cal( chat_cache.embedding_func, func_name="embedding", @@ -39,24 +44,29 @@ async def adapt_query(cache_data_convert, *args, **kwargs): model=model ) + # Initialize result containers cache_answers = [] cache_questions = [] cache_ids = [] cosine_similarity = None + # Similarity evaluation based on metric type if chat_cache.similarity_metric_type == MetricType.COSINE: cosine_similarity = cache_data_list[0][0] # This code uses the built-in cosine similarity evaluation in milvus if cosine_similarity < chat_cache.similarity_threshold: - return None + return None # No suitable match found + elif chat_cache.similarity_metric_type == MetricType.L2: - ## this is the code that uses L2 for similarity evaluation + # this is the code that uses L2 for similarity evaluation similarity_threshold = chat_cache.similarity_threshold similarity_threshold_long = chat_cache.similarity_threshold_long min_rank, max_rank = chat_cache.similarity_evaluation.range() rank_threshold = (max_rank - min_rank) * similarity_threshold * cache_factor rank_threshold_long = (max_rank - min_rank) * similarity_threshold_long * cache_factor + + # Clamp thresholds to valid range rank_threshold = ( max_rank if rank_threshold > max_rank @@ -71,6 +81,8 @@ async def adapt_query(cache_data_convert, *args, **kwargs): if rank_threshold_long < min_rank else rank_threshold_long ) + + # Evaluate similarity score if cache_data_list is None or len(cache_data_list) == 0: rank_pre = -1.0 else: @@ -81,12 +93,13 @@ async def adapt_query(cache_data_convert, *args, **kwargs): extra_param=context.get("evaluation_func", None), ) if rank_pre < rank_threshold: - return None + return None # Similarity too low else: raise ValueError( f"Unsupported similarity metric type: {chat_cache.similarity_metric_type}" ) + # Process search results with optional reranking if USE_RERANKER: reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=False) for cache_data in cache_data_list: @@ -116,7 +129,6 @@ async def adapt_query(cache_data_convert, *args, **kwargs): "question": pre_embedding_data, "embedding": embedding_data, } - eval_cache_data = { "question": ret[0], "answer": ret[1], @@ -135,9 +147,10 @@ async def adapt_query(cache_data_convert, *args, **kwargs): cache_questions.append((rank, ret[1])) cache_ids.append((rank, primary_id)) else: - # 不使用 reranker 时,走原来的逻辑 + # Original logic without reranking for cache_data in cache_data_list: primary_id = cache_data[1] + # Retrieve full cache entry data ret = await asyncio.to_thread( chat_cache.data_manager.get_scalar_data, cache_data, extra_param=context.get("get_scalar_data", None), model=model @@ -150,6 +163,7 @@ async def adapt_query(cache_data_convert, *args, **kwargs): cache_answers.append((cosine_similarity, ret[0])) cache_questions.append((cosine_similarity, ret[1])) cache_ids.append((cosine_similarity, primary_id)) + elif chat_cache.similarity_metric_type == MetricType.L2: if "deps" in context and hasattr(ret.question, "deps"): eval_query_data = { @@ -167,13 +181,14 @@ async def adapt_query(cache_data_convert, *args, **kwargs): "question": pre_embedding_data, "embedding": embedding_data, } - eval_cache_data = { "question": ret[0], "answer": ret[1], "search_result": cache_data, "embedding": None } + + # Evaluate similarity for this specific result rank = chat_cache.similarity_evaluation.evaluation( eval_query_data, eval_cache_data, @@ -195,6 +210,7 @@ async def adapt_query(cache_data_convert, *args, **kwargs): f"Unsupported similarity metric type: {chat_cache.similarity_metric_type}" ) + # Sort results by similarity score (highest first) cache_answers = sorted(cache_answers, key=lambda x: x[0], reverse=True) cache_questions = sorted(cache_questions, key=lambda x: x[0], reverse=True) cache_ids = sorted(cache_ids, key=lambda x: x[0], reverse=True) @@ -208,12 +224,14 @@ async def adapt_query(cache_data_convert, *args, **kwargs): return_id = chat_cache.post_process_messages_func( [t[1] for t in cache_ids] ) - # 更新命中次数 + + # Update hit count for analytics (async to avoid blocking) try: asyncio.create_task(asyncio.to_thread(chat_cache.data_manager.update_hit_count,return_id)) except Exception: logging.info('update_hit_count except, please check!') + # Record cache hit for reporting chat_cache.report.hint_cache() return cache_data_convert(return_message, return_query) return None \ No newline at end of file diff --git a/modelcache/cache.py b/modelcache/cache.py index 9aaa224..9149a3c 100644 --- a/modelcache/cache.py +++ b/modelcache/cache.py @@ -73,30 +73,56 @@ def close(): modelcache_log.error(e) def save_query_resp(self, query_resp_dict, **kwargs): + """ + Save query response asynchronously to avoid blocking main thread. + Used for logging and analytics purposes. + """ + # Execute save operation in a separate thread to maintain async performance asyncio.create_task(asyncio.to_thread( self.data_manager.save_query_resp, query_resp_dict, **kwargs )) - def save_query_info(self,result, model, query, delta_time_log): + def save_query_info(self, result, model, query, delta_time_log): + """ + Save query information with execution timing for performance analysis. + Serializes query data to JSON for storage. + """ + # Convert query to JSON and save asynchronously asyncio.create_task(asyncio.to_thread( self.data_manager.save_query_resp, result, model=model, query=json.dumps(query, ensure_ascii=False), delta_time=delta_time_log )) async def handle_request(self, param_dict: dict): - # param parsing + """ + Main entry point for processing cache requests. + + Routes requests to appropriate handlers based on request type. + Supports: query, insert, remove, register operations. + + Args: + param_dict: Request parameters containing type, scope, query, etc. + + Returns: + dict: Response dictionary with errorCode, result data, and metadata + """ + # Parse and validate request parameters try: request_type = param_dict.get("type") + # Extract model information from scope scope = param_dict.get("scope") model = None if scope is not None: model = scope.get('model') + # Normalize model name for consistent storage (replace special chars) model = model.replace('-', '_') model = model.replace('.', '_') query = param_dict.get("query") chat_info = param_dict.get("chat_info") + + # Validate request type against supported operations if request_type is None or request_type not in ['query', 'insert', 'remove', 'register']: result = {"errorCode": 102, "errorDesc": "type exception, should one of ['query', 'insert', 'remove', 'register']", @@ -104,15 +130,16 @@ async def handle_request(self, param_dict: dict): self.save_query_resp(result, model=model, query='', delta_time=0) return result except Exception as e: + # Return error response for parameter parsing failures return {"errorCode": 103, "errorDesc": str(e), "cacheHit": False, "delta_time": 0, "hit_query": '', "answer": ''} - # model filter + # Apply model-based filtering (blacklist check) filter_resp = model_blacklist_filter(model, request_type) if isinstance(filter_resp, dict): return filter_resp - # handle request + # Route to appropriate handler based on request type if request_type == 'query': return await self.handle_query(model, query) elif request_type == 'insert': @@ -129,6 +156,7 @@ async def handle_register(self, model): model=model, cache_obj=self ) + # Process registration response and return standardized result if response in ['create_success', 'already_exists']: result = {"errorCode": 0, "errorDesc": "", "response": response, "writeStatus": "success"} else: @@ -138,12 +166,16 @@ async def handle_register(self, model): async def handle_remove(self, model, param_dict): remove_type = param_dict.get("remove_type") id_list = param_dict.get("id_list", []) + + # Execute removal operation through adapter response = await adapter.ChatCompletion.create_remove( model=model, remove_type=remove_type, id_list=id_list, cache_obj=self ) + + # Process removal response and standardize result format if not isinstance(response, dict): return {"errorCode": 401, "errorDesc": "", "response": response, "removeStatus": "exception"} state = response.get('status') @@ -156,6 +188,7 @@ async def handle_remove(self, model, param_dict): async def handle_insert(self, chat_info, model): try: try: + # Execute insertion through adapter with error handling response = await adapter.ChatCompletion.create_insert( model=model, chat_info=chat_info, @@ -164,6 +197,7 @@ async def handle_insert(self, chat_info, model): except Exception as e: return {"errorCode": 302, "errorDesc": str(e), "writeStatus": "exception"} + # Process insertion response if response == 'success': result = {"errorCode": 0, "errorDesc": "", "writeStatus": "success"} else: @@ -174,25 +208,35 @@ async def handle_insert(self, chat_info, model): async def handle_query(self, model, query): try: - start_time = time.time() + start_time = time.time() # Start performance timer + + # Execute query through adapter system response = await adapter.ChatCompletion.create_query( scope={"model": model}, query=query, cache_obj=self ) + + # Calculate query execution time delta_time = '{}s'.format(round(time.time() - start_time, 2)) + + # Process different response types if response is None: + # No cache hit found result = {"errorCode": 0, "errorDesc": '', "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} - # elif response in ['adapt_query_exception']: elif isinstance(response, str): + # Error occurred during query processing result = {"errorCode": 201, "errorDesc": response, "cacheHit": False, "delta_time": delta_time, "hit_query": '', "answer": ''} else: + # Cache hit found - extract response data answer = response['data'] hit_query = response['hitQuery'] result = {"errorCode": 0, "errorDesc": '', "cacheHit": True, "delta_time": delta_time, "hit_query": hit_query, "answer": answer} + + # Log query performance data asynchronously delta_time_log = round(time.time() - start_time, 2) self.save_query_info(result, model, query, delta_time_log) except Exception as e: @@ -202,6 +246,7 @@ async def handle_query(self, model, query): return result def flush(self): + """Flush all cached data to persistent storage backends.""" self.data_manager.flush() @staticmethod @@ -211,11 +256,35 @@ async def init( embedding_model: EmbeddingModel, embedding_workers_num: int ) -> tuple['Cache' , AbstractEventLoop]: - #================= configurations for databases ===================# + """ + Initialize a complete Cache system with all required components. + + Args: + sql_storage: SQL backend type ("mysql", "sqlite", "elasticsearch") + vector_storage: Vector backend type ("milvus", "faiss", "chromadb", "redis") + embedding_model: Embedding model enum value + embedding_workers_num: Number of parallel embedding worker processes + + Returns: + tuple: (Cache instance, event loop) ready for async operations + + Raises: + CacheError: If unsupported storage backends or model configuration issues + + Example: + cache, loop = await Cache.init( + sql_storage="mysql", + vector_storage="milvus", + embedding_model=EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2, + embedding_workers_num=4 + ) + """ + #================= Database configuration loading ===================# sql_config = configparser.ConfigParser() vector_config = configparser.ConfigParser() + # Load SQL storage configuration from INI files if sql_storage == "mysql": sql_config.read('modelcache/config/mysql_config.ini') elif sql_storage == "elasticsearch": @@ -226,6 +295,7 @@ async def init( modelcache_log.error(f"Unsupported cache storage: {sql_storage}.") raise CacheError(f"Unsupported cache storage: {sql_storage}.") + # Load vector storage configuration from INI files if vector_storage == "milvus" : vector_config.read('modelcache/config/milvus_config.ini') elif vector_storage == "chromadb" : @@ -233,22 +303,24 @@ async def init( elif vector_storage == "redis" : vector_config.read('modelcache/config/redis_config.ini') elif vector_storage == "faiss" : - vector_config = None # faiss does not require additional configuration + vector_config = None # FAISS does not require external configuration else: modelcache_log.error(f"Unsupported vector storage: {vector_storage}.") raise CacheError(f"Unsupported vector storage: {vector_storage}.") - #=============== model-specific configuration =====================# + #=============== embedding-model-specific configuration =====================# - event_loop = asyncio.get_running_loop() + event_loop = asyncio.get_running_loop() # Get current async event loop model_path = embedding_model.value['model_path'] dimension = embedding_model.value['dimension'] + # Validate that embedding model has required configuration if model_path is None or dimension is None: modelcache_log.error(f"Please set the model_path and dimension for {embedding_model} in modelcache/embedding/base.py.") raise CacheError(f"Please set the model_path and dimension for {embedding_model} in modelcache/embedding/base.py.") + # Initialize parallel embedding generation system embedding_dispatcher = EmbeddingDispatcher(embedding_model, model_path, event_loop, embedding_workers_num) #=== These will be used to initialize the cache ===# @@ -262,7 +334,7 @@ async def init( normalize: bool = None #==================================================# - # switching based on embedding_model + # Configure cache behavior based on embedding model type if (embedding_model == EmbeddingModel.HUGGINGFACE_ALL_MPNET_BASE_V2 or embedding_model == EmbeddingModel.HUGGINGFACE_ALL_MINILM_L6_V2 or embedding_model == EmbeddingModel.HUGGINGFACE_ALL_MINILM_L12_V2): @@ -285,13 +357,14 @@ async def init( similarity_threshold_long = 0.95 normalize = True - # add more configurations for other embedding models as needed + # Add configurations for additional embedding models as needed else: modelcache_log.error(f"Please add configuration for {embedding_model} in modelcache/cache.py.") raise CacheError(f"Please add configuration for {embedding_model} in modelcache/cache.py.") # ====================== Data manager ==============================# + # Create coordinated data manager with all storage backends data_manager = DataManager.get( SQLStorage.get(sql_storage, config=sql_config), VectorStorage.get( @@ -307,6 +380,7 @@ async def init( #================== Cache Initialization ====================# + # Create fully configured Cache instance cache = Cache( embedding_model = embedding_model, similarity_metric_type = similarity_metric_type, diff --git a/modelcache/embedding/embedding_dispatcher.py b/modelcache/embedding/embedding_dispatcher.py index 56b5e27..16f4beb 100644 --- a/modelcache/embedding/embedding_dispatcher.py +++ b/modelcache/embedding/embedding_dispatcher.py @@ -10,16 +10,17 @@ def worker_func(embedding_model: EmbeddingModel, model_path, task_queue, result_queue, worker_id): + """Worker function that runs in separate processes to generate embeddings.""" base_embedding = BaseEmbedding.get(embedding_model, model_path=model_path) print(f"Embedding worker {worker_id} started.") try: while True: - job_id, data = task_queue.get() + job_id, data = task_queue.get() # Get task from queue try: - result = base_embedding.to_embeddings(data) + result = base_embedding.to_embeddings(data) # Generate embedding except Exception as e: result = e - result_queue.put((job_id, result)) + result_queue.put((job_id, result)) # Send result back except KeyboardInterrupt: print(f"Embedding worker {worker_id} stopped.") except Exception as e: @@ -27,6 +28,8 @@ def worker_func(embedding_model: EmbeddingModel, model_path, task_queue, result_ class EmbeddingDispatcher: + """Manages a pool of worker processes for parallel embedding generation.""" + def __init__( self, embedding_model: EmbeddingModel, @@ -34,14 +37,15 @@ def __init__( event_loop: AbstractEventLoop, num_workers: int ): + """Initialize the dispatcher with worker processes.""" if num_workers <= 0: raise ValueError("Number of workers must be greater than 0.") - self.task_queue = multiprocessing.Queue() - self.result_queue = multiprocessing.Queue() - self.futures: dict[str, asyncio.Future] = {} + self.task_queue = multiprocessing.Queue() # Tasks to workers + self.result_queue = multiprocessing.Queue() # Results from workers + self.futures: dict[str, asyncio.Future] = {} # Pending futures self.event_loop = event_loop - self._start_result_collector_thread() + self._start_result_collector_thread() # Start result collection thread # Start worker processes self.workers = [] @@ -56,10 +60,11 @@ def __init__( self.workers.append(p) def _start_result_collector_thread(self): + """Start a thread to collect results from worker processes.""" def collect(): while True: - job_id, result = self.result_queue.get() - future = self.futures.pop(job_id, None) + job_id, result = self.result_queue.get() # Get result from queue + future = self.futures.pop(job_id, None) # Retrieve future if future: self.event_loop.call_soon_threadsafe( future.set_exception if isinstance(result, Exception) else future.set_result, @@ -70,9 +75,10 @@ def collect(): t.start() def embed(self, data: str) -> Future: - job_id = str(uuid.uuid4()) - future = asyncio.get_running_loop().create_future() - self.futures[job_id] = future - self.task_queue.put((job_id, data)) + """Submit a task for embedding generation.""" + job_id = str(uuid.uuid4()) # Generate unique job ID + future = asyncio.get_running_loop().create_future() # Create future + self.futures[job_id] = future # Store future + self.task_queue.put((job_id, data)) # Add task to queue return future diff --git a/modelcache/manager/data_manager.py b/modelcache/manager/data_manager.py index 5a32c01..7f019cd 100644 --- a/modelcache/manager/data_manager.py +++ b/modelcache/manager/data_manager.py @@ -173,23 +173,24 @@ def __init__( ): self.max_size = max_size self.clean_size = clean_size - self.s = s - self.v = v - self.o = o + self.s = s # SQL storage + self.v = v # Vector storage + self.o = o # Object storage (optional) self.normalize = normalize - # added + # Initialize memory cache with specified eviction policy self.eviction_base = MemoryCacheEviction( policy=policy, maxsize=max_size, clean_size=clean_size) def save(self, questions: List[any], answers: List[any], embedding_datas: List[any], **kwargs): + """Save multiple questions, answers, and embeddings to storage.""" model = kwargs.pop("model", None) self.import_data(questions, answers, embedding_datas, model) - def save_query_resp(self, query_resp_dict, **kwargs): + """Save query response log to SQL storage for analytics.""" save_query_start_time = time.time() self.s.insert_query_resp(query_resp_dict, **kwargs) save_query_delta_time = '{}s'.format(round(time.time() - save_query_start_time, 2)) @@ -220,10 +221,17 @@ def _process_question_data(self, question: Union[str, Question]): def import_data( self, questions: List[Any], answers: List[Answer], embedding_datas: List[Any], model: Any ): + """ + Add multiple cache entries into all storage backends. + + Coordinates data insertion across SQL, vector, and object storage, + with memory cache population and optional vector normalization. + """ if len(questions) != len(answers) or len(questions) != len(embedding_datas): raise ParamError("Make sure that all parameters have the same length") cache_datas = [] + # Normalize embedding vectors if configured if self.normalize: embedding_datas = [ normalize(embedding_data) for embedding_data in embedding_datas @@ -236,7 +244,10 @@ def import_data( embedding_data = embedding_data.astype("float32") cache_datas.append([answer, question, embedding_data, model]) + # Insert into SQL storage and get generated IDs ids = self.s.batch_insert(cache_datas) + + # Prepare vector data and populate memory cache datas = [] for _id,embedding_data,cache_data in zip(ids,embedding_datas,cache_datas): datas.append(VectorData(id=_id, data=embedding_data.astype("float32"))) @@ -244,9 +255,15 @@ def import_data( self.v.mul_add(datas,model) def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: + """ + Retrieve scalar data with multi-level caching strategy. + + First checks memory cache, then falls back to SQL storage. + """ model = kwargs.pop("model") - #Get Data from RAM Cache _id = res_data[1] + + # Try to get from memory cache first (fastest) cache_hit = self.eviction_base.get(_id, model=model) if cache_hit is not None: return cache_hit @@ -257,12 +274,19 @@ def get_scalar_data(self, res_data, **kwargs) -> Optional[CacheData]: return cache_data def update_hit_count(self, primary_id, **kwargs): + """Update hit count statistics in SQL storage.""" self.s.update_hit_count_by_id(primary_id) def hit_cache_callback(self, res_data, **kwargs): + """Callback executed on cache hit to update memory cache.""" self.eviction_base.get(res_data[1]) def search(self, embedding_data, **kwargs): + """ + Search for similar vectors in vector storage. + + Applies normalization if configured and delegates to vector backend. + """ model = kwargs.pop("model", None) if self.normalize: embedding_data = normalize(embedding_data) @@ -270,15 +294,24 @@ def search(self, embedding_data, **kwargs): return self.v.search(data=embedding_data, top_k=top_k, model=model) def delete(self, id_list, **kwargs): + """ + Delete cache entries from all storage backends. + + Removes from memory cache, vector storage, and marks as deleted in SQL. + Returns detailed status of deletion operations. + """ model = kwargs.pop("model") try: + # Remove from memory cache for id in id_list: - self.eviction_base.get_cache(model).pop(id, None) # Remove from in-memory LRU too + self.eviction_base.get_cache(model).pop(id, None) + # Delete from vector storage v_delete_count = self.v.delete(ids=id_list, model=model) except Exception as e: return {'status': 'failed', 'milvus': 'delete milvus data failed, please check! e: {}'.format(e), 'mysql': 'unexecuted'} try: + # Mark as deleted in SQL storage s_delete_count = self.s.mark_deleted(id_list) except Exception as e: return {'status': 'failed', 'milvus': 'success', @@ -288,13 +321,20 @@ def delete(self, id_list, **kwargs): 'mysql': 'delete_count: '+str(s_delete_count)} def create_index(self, model, **kwargs): + """Create vector index for a specific model.""" return self.v.create(model) def truncate(self, model): - # drop memory cache data + """ + Truncate all data for a specific model across all storage backends. + + Clears memory cache, rebuilds vector storage, and deletes SQL data. + Returns detailed status of truncation operations. + """ + # Clear memory cache data self.eviction_base.clear(model) - # drop vector base data + # Rebuild vector storage (drops and recreates collection) try: vector_resp = self.v.rebuild_col(model) except Exception as e: @@ -302,7 +342,8 @@ def truncate(self, model): 'ScalarDB': 'unexecuted'} if vector_resp: return {'status': 'failed', 'VectorDB': vector_resp, 'ScalarDB': 'unexecuted'} - # drop scalar base data + + # Delete scalar data from SQL storage try: delete_count = self.s.model_deleted(model) except Exception as e: @@ -311,10 +352,12 @@ def truncate(self, model): return {'status': 'success', 'VectorDB': 'rebuild', 'ScalarDB': 'delete_count: ' + str(delete_count)} def flush(self): + """Flush all storage backends to ensure data persistence.""" self.s.flush() self.v.flush() def close(self): + """Close all storage connections and release resources.""" self.s.close() self.v.close() diff --git a/modelcache/manager/eviction/arc_cache.py b/modelcache/manager/eviction/arc_cache.py index ae3a260..4f97231 100644 --- a/modelcache/manager/eviction/arc_cache.py +++ b/modelcache/manager/eviction/arc_cache.py @@ -5,22 +5,34 @@ _sentinel = object() class ARC(Cache): + """ + Adaptive Replacement Cache (ARC) implementation. + + ARC maintains four lists (T1, T2, B1, B2) to adaptively balance + between LRU and LFU eviction strategies based on access patterns. + """ + def __init__(self, maxsize, getsizeof=None): + """Initialize ARC cache with maximum size.""" super().__init__(maxsize, getsizeof) - self.t1 = OrderedDict() - self.t2 = OrderedDict() - self.b1 = OrderedDict() - self.b2 = OrderedDict() - self.p = 0 - self._rw_lock = rwlock.RWLockWrite() + self.t1 = OrderedDict() # Recent items + self.t2 = OrderedDict() # Frequent items + self.b1 = OrderedDict() # Ghost entries for T1 + self.b2 = OrderedDict() # Ghost entries for T2 + self.p = 0 # Adaptive parameter + self._rw_lock = rwlock.RWLockWrite() # Thread safety def __len__(self): + """Return total number of cached items.""" return len(self.t1) + len(self.t2) def __contains__(self, key): + """Check if key exists in cache.""" return key in self.t1 or key in self.t2 def _evict_internal(self): + """Internal method to evict items when cache is full.""" + # Evict from cache lists to ghost lists while len(self.t1) + len(self.t2) > self.maxsize: if len(self.t1) > self.p or (len(self.t1) == 0 and len(self.t2) > 0): key, value = self.t1.popitem(last=False) @@ -28,46 +40,56 @@ def _evict_internal(self): else: key, value = self.t2.popitem(last=False) self.b2[key] = value + + # Maintain ghost list sizes while len(self.b1) > (self.maxsize - self.p): self.b1.popitem(last=False) while len(self.b2) > self.p: self.b2.popitem(last=False) def __setitem__(self, key, value): + """Insert or update a cache entry.""" with self._rw_lock.gen_wlock(): + # Remove key from all lists first for l in (self.t1, self.t2, self.b1, self.b2): l.pop(key, None) + # Add to recent list (T1) self.t1[key] = value self.t1.move_to_end(key) self._evict_internal() def __getitem__(self, key): + """Retrieve a cache entry and update access pattern.""" with self._rw_lock.gen_wlock(): if key in self.t1: + # Move from recent to frequent list value = self.t1.pop(key) self.t2[key] = value self.t2.move_to_end(key) - self.p = max(0, self.p - 1) + self.p = max(0, self.p - 1) # Adjust adaptive parameter self._evict_internal() return value if key in self.t2: + # Access frequent list value = self.t2.pop(key) self.t2[key] = value self.t2.move_to_end(key) - self.p = min(self.maxsize, self.p + 1) + self.p = min(self.maxsize, self.p + 1) # Adjust adaptive parameter self._evict_internal() return value if key in self.b1: + # Promote from ghost list B1 to frequent list T2 self.b1.pop(key) - self.p = min(self.maxsize, self.p + 1) + self.p = min(self.maxsize, self.p + 1) # Adjust adaptive parameter self._evict_internal() value = super().__missing__(key) self.t2[key] = value self.t2.move_to_end(key) return value if key in self.b2: + # Promote from ghost list B2 to frequent list T2 self.b2.pop(key) - self.p = max(0, self.p - 1) + self.p = max(0, self.p - 1) # Adjust adaptive parameter self._evict_internal() value = super().__missing__(key) self.t2[key] = value @@ -76,9 +98,11 @@ def __getitem__(self, key): return super().__getitem__(key) def __missing__(self, key): + """Handle missing keys.""" raise KeyError(key) def pop(self, key, default=_sentinel): + """Remove a cache entry.""" with self._rw_lock.gen_wlock(): for l in (self.t1, self.t2, self.b1, self.b2): if key in l: @@ -88,6 +112,7 @@ def pop(self, key, default=_sentinel): return default def clear(self): + """Clear all cache entries.""" with self._rw_lock.gen_wlock(): self.t1.clear() self.t2.clear() @@ -97,10 +122,12 @@ def clear(self): super().clear() def __iter__(self): + """Iterate over cache keys.""" yield from self.t1 yield from self.t2 def __repr__(self): + """Return string representation of the cache.""" return (f"ARC(maxsize={self.maxsize}, p={self.p}, len={len(self)}, " f"t1_len={len(self.t1)}, t2_len={len(self.t2)}, " f"b1_len={len(self.b1)}, b2_len={len(self.b2)})") diff --git a/modelcache/manager/eviction/memory_cache.py b/modelcache/manager/eviction/memory_cache.py index 347abf9..838e8cd 100644 --- a/modelcache/manager/eviction/memory_cache.py +++ b/modelcache/manager/eviction/memory_cache.py @@ -67,3 +67,4 @@ def get_cache(self, model: str): @property def policy(self) -> str: return self._policy + diff --git a/modelcache/manager/eviction/wtinylfu_cache.py b/modelcache/manager/eviction/wtinylfu_cache.py index 1f41447..378b1ed 100644 --- a/modelcache/manager/eviction/wtinylfu_cache.py +++ b/modelcache/manager/eviction/wtinylfu_cache.py @@ -4,42 +4,60 @@ class CountMinSketch: def __init__(self, width=1024, depth=4, decay_interval=10000): + """Initialize Count-Min Sketch with specified dimensions.""" self.width = width self.depth = depth - self.tables = [[0]*width for _ in range(depth)] - self.seeds = [random.randrange(1<<30) for _ in range(depth)] - self.ops = 0 + self.tables = [[0]*width for _ in range(depth)] # Hash tables + self.seeds = [random.randrange(1<<30) for _ in range(depth)] # Hash seeds + self.ops = 0 # Operation counter for decay trigger self.decay_interval = decay_interval def _hash(self, x, seed): + """Hash function for mapping items to table positions.""" return hash((x, seed)) % self.width def add(self, x): + """Add an item and increment its frequency estimate.""" self.ops += 1 - est = self.estimate(x) + est = self.estimate(x) # Get current estimate + # Update all hash tables for i, seed in enumerate(self.seeds): idx = self._hash(x, seed) if self.tables[i][idx] <= est: self.tables[i][idx] += 1 + + # Periodic decay to handle changing patterns if self.ops >= self.decay_interval: self.decay() self.ops = 0 def estimate(self, x): + """Estimate frequency of an item (minimum across all tables).""" return min(self.tables[i][self._hash(x, seed)] for i, seed in enumerate(self.seeds)) def decay(self): + """Decay all frequency counts by half.""" for table in self.tables: for i in range(len(table)): - table[i] >>= 1 + table[i] >>= 1 # Right shift (divide by 2) class W2TinyLFU(Cache): + """ + Window Tiny LFU cache implementation. + + Combines a small LRU window cache with a main cache divided into + probation and protected segments, using frequency estimation for + admission control. + """ + def __init__(self, maxsize, window_pct=0.01): """ - param maxsize: Maximum size of the cache. + Initialize W-TinyLFU cache. - param window_pct: Percentage of the cache size to be used for the window. + Args: + maxsize: Maximum size of the cache + window_pct: Percentage of cache size for the window (default 1%) """ super().__init__(maxsize) self.window_size = max(1, int(maxsize * window_pct)) @@ -47,29 +65,34 @@ def __init__(self, maxsize, window_pct=0.01): self.probation_size = rest // 2 self.protected_size = rest - self.probation_size - self.window = LRUCache(maxsize=self.window_size) - self.probation = LFUCache(maxsize=self.probation_size) - self.protected = LFUCache(maxsize=self.protected_size) + # Three cache segments + self.window = LRUCache(maxsize=self.window_size) # Recent items + self.probation = LFUCache(maxsize=self.probation_size) # New main cache items + self.protected = LFUCache(maxsize=self.protected_size) # Frequently accessed items - self.cms = CountMinSketch() - self.data = {} - self._rw_lock = rwlock.RWLockWrite() + self.cms = CountMinSketch() # Frequency estimator + self.data = {} # Cache data storage + self._rw_lock = rwlock.RWLockWrite() # Read-write lock for thread safety def __setitem__(self, key, value): + """Add or update an item in the cache.""" with self._rw_lock.gen_wlock(): self.data[key] = value self._put(key) def __getitem__(self, key): + """Retrieve an item from the cache.""" val = self.get(key, default=None) if val is None: raise KeyError(key) return val def __contains__(self, key): + """Check if an item exists in the cache.""" return key in self.window or key in self.probation or key in self.protected def __delitem__(self, key): + """Remove an item from the cache.""" with self._rw_lock.gen_wlock(): self.data.pop(key, None) self.window.pop(key, None) @@ -77,6 +100,10 @@ def __delitem__(self, key): self.protected.pop(key, None) def get(self, key, default=None): + """ + Retrieve an item from the cache, updating its position + in the cache hierarchy if necessary. + """ if key in self.window: self.window[key] = True return self.data.get(key, default) @@ -94,6 +121,10 @@ def get(self, key, default=None): return default def _put(self, key): + """ + Add an item to the cache, using frequency-based admission + control and eviction policies. + """ self.cms.add(key) if key in self: return @@ -113,6 +144,9 @@ def _put(self, key): self.data.pop(key, None) def _admit_to_main(self, key): + """ + Admit an item to the main cache (probation or protected segment). + """ if key in self.protected or key in self.probation: return if self.probation_size == 0: @@ -129,8 +163,10 @@ def _admit_to_main(self, key): self.data.pop(key, None) def clear(self): + """Clear all items from the cache.""" with self._rw_lock.gen_wlock(): self.window.clear() self.probation.clear() self.protected.clear() - self.data.clear() \ No newline at end of file + self.data.clear() + diff --git a/modelcache/manager/scalar_data/sql_storage.py b/modelcache/manager/scalar_data/sql_storage.py index 5683f61..20eba9b 100644 --- a/modelcache/manager/scalar_data/sql_storage.py +++ b/modelcache/manager/scalar_data/sql_storage.py @@ -145,6 +145,7 @@ def get_data_by_id(self, key: int): conn.close() if resp is not None and len(resp) == 4: + # parse the numpy array from bytes and return the data return resp[0], resp[1], np.frombuffer(resp[2], dtype=np.float32), resp[3] else: return None diff --git a/modelcache/manager/vector_data/base.py b/modelcache/manager/vector_data/base.py index 6742c94..689f833 100644 --- a/modelcache/manager/vector_data/base.py +++ b/modelcache/manager/vector_data/base.py @@ -150,3 +150,4 @@ def check_dimension(dimension): if dimension <= 0: raise ParamError(f"the dimension should be greater than zero, current value: {dimension}.") +

    中文 | - English + English

    8Vg$*cfARqQ~pPW3uPu5;OZ+va!E?bC7~XM8Gi6kg(O5hTS{8XqAu5KMMe z-15MCxXPkJ84vV8Dd2rb9F+aDV-02CRy0Z9v_kp-4Hm|Ho(BIBW>>6xr&eViUv_{n zEi5Xw=EgzrQZYvg*XE5L9sU_Qaf0G>q&M)j5Vo;S-1oA~23Z+$+S}r73 zR+-KHK_cX?o z-LU7lJywIvNL?NqSrJ^P*vCI!H<#7o$(^)z8mPVM~RgR4;Bkk=?dk zAWqSD3&iP<_-qyR!n$NzokBT7q&?+#;bR`GgC@wteZ`*pEk`KgS5*3&?ylEQoVQgx ze{hML%dcdXloN!pCkk`$DA*{LCmY$&<~{U*cqWzj$d4GaN(Dtx_;SBKxetF7A(cCR zC|Uhi(vZ0gRxXno2M#)S$K!iO81;J4aP#NwhePJ%yxyJOA3j~qh`c(BeG9muKI-5e zfqOF^&`i*)NUDXZ>&9e*0&VUDlig5Im2aR9n@#(5pwkpRhC_s<6( zMOGH-->|3e3=G&Ncga4#Oub^1B|qs@KmX>(FyfqZxsb&ol;8imOS65kzWX=-jm&j9 z!kz6h<}S+gzfBlgcFD1+*y`)*xup?nEc1T{`bnj#jPul*atBZ(amHuWyp{q>Q3Rl` zN0Bzq!*YVuWcsv|%W1|RP2o+dUPpDFaQ$YCy`j1FIp&`mIDVu|TeAn+_-tok^M<6Z z3jb}N8!yJ|5_;;T!!gb|B0~I(;Et}%7^A>K!*OJ?X;AK*@4Is z7jlhTcz-z(P^u&tcV+M^24k;a(wEYA#K6YRgJ3gmnKlDVGgNQWwU(pw^p;Mvv{otH z^@b}w1+M|J_a4AP4~*F+_l0H}-*pr7i*nIv^~Vsjv~C1eSTfMUCYxCl zubiw|KHczrP9J2n``j=zPgsZoJY{SAkaxMM`vuLr8rV$AFO-yx0-%S8^%gO1ZlPfW zm-6@5r-{Gqs@}BC5PNx;Q*S-cwTRK9Hw^yT;%U(#mxAma+4wz%LA5D9K!XlGOyJ>v zug!?1_qSDTPF~+pvhSQ=;DGJ8@lQo$dbCz@Qts;Hzx2aZr63mQJ)6^ZrMSR}JCl{g zFqoTsRiwcb4?e6d0U`+zAJAtyMi}lm$}B5;n~2@tFUsWtswHKm^C5mA z9=he>YzX4c#r@*;258`MZtkeo&%l!YoPSzA5T>u9#k#}e-Q}2S4(Fv z^rciJ0K<|M$ZtO*lXr!}PMq3DgV}&2FYVR zpGVC*gVfJ~KWx?6fAuwk#Z7;o0S8T;olFdnjai{gJzpeEeDPx}PgL5XH}NL^ye4fG zbq<>QPUW#N={j5ft&8z;3q+z`CcTG*z3r(IvJAKaI=agP!@a%)tOEPH#c%&PRotfy zrMnsk_q!$Dz>BL<%Xfoj9@CjyGe?3f7=KeI!%T5C4tVzC5G=Z}yRA=WScy`jXb^&| z(zmFzDMmRMeiC(+jCi(6F7QPWZz;`hB4yA-ViL|yeukIz%O%=>wy^!_K52$f{Kn1# zdE8#G+XWZKy*JeRgK-m~Xai6{<^Z}N*?wt`S)6Jk5?~m>-Pu`~TKGxQcT+}L9cB6! z4Ta!o&D?1}*PF%^sO)lN$OFX1|JvIA+?_~>Ald+Ri3K|uJ}}PC_!P%A&jbGo92)Vt&YEheNXZ^#lzgK)Q! zRw7j}R)Bg{Ov2*N@+%j%wsz7)h&b9<_%6q^tg+VhrAs(4-hgvAbutwZN`rjTNTVkc zs-6;01o}X2qi)`l66ON)lK+Z2N>|=o?tI_OX%_yH%uBQA3g$JumW(!BiH~{t;}hJU z!rm>U^F5-MxWc#Db*38qKd`!yRkZh+3SshKAt^d8c5=;=+q=1oMx$^dko|eXAkL3To zM<}lvm#wmmu8Fj`uoibP>n%;DUxpk8nwCjjDOWb`rQT-il4>4F?y5GXQMr6UjC4VD zQ8NWAn*76iPy;&r7q`k2VSaIuE__L@ZKu$c&V#yV52GWO=2)1@pYd#tmz#?F-VE{S zv&N+!%7D0-$}cINGEVM-xqG;J@ats})A!+Y|0aLnmIO${5t2scCW|!J{8hF@FDS_*Zy7mWX)6?&? zndjH0HnKlgzYRKmkG{KdZaz;@wAVQ^@l)?nLairKtp}vw(K390F9w2#1@tk#7|mCs ztbaEgf5V=YYWSp84|UXdvw!HbUr)jWhRd}g;hRdF?#HDcAH+%akMZ-c3W$0}eRc+E|{6qiPAkNZOe1WRj7X;sFRH(=PO zGS2&$fyWpI@chH+2&X#ig+sF^@+pe4 zsL^?XOrbF=K!Y)d-~@}dL;f;ITo}Vg{ejB{Kom*mbUYCz?9jA8ZsWulM@O1<8xkYn zMSghtUjJWcXyN;WJ+I?SL}+VtR7B*`MAH1c4IVF?B5-%`qqF53Oad8anMiA97 z^Q&JSo)W!~!M14F0+u+sPgo^y3~%2eCh@@CaP;HCZqvpM#K0hggCwDbPMRq?QjgBu zcfA`nbrIwgOfyNW!H2AP0s^HKCgXO%mzOHJr*~rB{TRl0&MLup>bNbX~ zEP~&`8bSr`UjOfPPp2Zq8S8LwSJNED)jiaso1pnm(XS7W}C= zi#=sCUXMDFI1Od7HJ{9ke!$;mRld&t){`k!uZzKO!>h5``Yyo!&}?`v6SJ1;UhI+7 zO>u*6zFxkdT$-a-0x_*+7Sk=xJ^?2)I%sBc_OQgb0KCj*h27nF$51;no zJ!Aemi6K{lrJ0z*ot?=US#AZ`Yj@R7zTgz&*ZOWLPgSopkhLHXIV3Q@k%8?nyZM?D z7c~&l6pWgVzR3rzTQ_K=64+loVb2#Y^P0}#B#sS6?vfPg@&rVNNp3szhh2-S)?fb?8d$2YM15S#|F&YqU{AdXLq(gQ1L<%nmZ*4nb^SYJBK{!2YF)Y|Y_ z%Soadya%X9wF3>lC|Rgqr4}95bc-@BwO*EjW5$20^h&j1%;GEjuFeqXF$qbx!2 zr$$p==p&5g>IlwaVB#KkR{`U?-$dj6k7euMe(A!h?FHS0g&=QSfgWJo#w(%+*&Gb< z*>cHmW2iR+MR(4C9n(ccw2A$~*E)e+!jC#26e69d=}jw|qe2_^Mbx}BIzIUDNQMM& zQDHqpEv}C_;&@dcj}XQ4Zi~$-_AL>B1ze~|^O=$1?kw;da0fHiy#Q!&j*QK1+JXM| zW79MB^}K2H(@I$nEW+aOj@EU(J(iW36S2|$7~?>M$WB(bXE%NkRl~bO8-;*v+Xt4~ zaxGY-0(p6k>=K9CqCU_Zs!^S|J6T$M;Ypj-FyTX-k- z2Wl3fQ9viq&htPH_r5X^pVeE9N%C~Oaz2z@zXiO-#Yf&P@FZ!8?IR?%7Icfhd?=;I zS;T2NPo(DXtSIp7J8!;XMhvj6pJ?_34a{abrCm1=cz`{Ja{yCHhT4v?FP3a-txAL6 zZAGyf)x2fe`@Hc*T#Rc;D-}{P@o>Fv3Dd$~&;+o341svlyL~omI@}?&>dwz3akgOw z6;rTOIpPI~g-IDwE8ulDK#{uUL;``7SUYhGK$yFO#ZMpS(d=1=GwTXmK_d+6SX zN^8bYUZd(THge2Y{csVM`VOphqRYK)oF=-HKO8%ulO)V)&@OO{JIC+qzR|q8T~WU{ zhS4u^w_3O8=tZo=iupT{UlI?rz@z)zsH?sA;PLTvmz^`P_&C6TNu^mfynNVxf3Hq| z`-2qhi;WV~9ttQ_u1a;E191$chc+@aOqZn;ZT~8@i*QjnQtSYkjP=6MK`6X z5SSRn6L@&)N&>5k~|S-n7r2y}!?9>d@U&{YIOu&2_Y^9y5#;_3E_ zuw`a5&XjKEZ}D_{}W zvZ}8@-0<{jxNEldaHxr<{{8O|8fgMkn^a8}ONwqI5HMkBGQ1M;z+$bEH%dF{S>s4= ztjqaH;^4hh8@8##nrH{R8IQEyU$8a@=ObGHM%V|B0J~O$p5ZG9BQyJ4bIZnOMLmmg z3gSdl1;4XW6Wkz%=RqN`=(T!mjx#v#V4bv3B@#INo8qfunZ%Iyl_w=9!wz`b)_p%W zORu?|V4h|5se)F(QRIrF;SoCKS`2o3hGUQCDTe7H4~X|xZ(+{;GIxLGauTaPijVEr zf3>zmSA6%A>_aSl`Y{vo7(zsRQi$T552kHJuc*^Px5#=C0&2wYr!Y!q!@__8ITq;h zkwDR1fw)ujj?X2M=5yuo+xEe6&F9oSE6H^u0w{ z>TA&X-&bA1Xm5k*8r3T=)%IS}O9)1Sl9I&#;R$6E_gkPeYIN+AsPm5x4HX}MHK)}f z?f7ckpC2Z*rQ8b#F)WpB(i=i}od?;JZY;IJI-Rm)Y|LIh3x9Tb>RQSqQNI9$6Px|>Q9($6g7A}HiINNu)}kqId3E59|G@;4u|W3F#wbtfH+7e4%|sJ2KfB` zGlo)tBcWMHOmf~m+*^Swp!U+`@AJZ5#ZHZS$1+#NqaA-3l2H%KC%((g1Y)tNHu4qA zL-MzR-2plqAll_rI{Z`YCJ?e*!Bmz(G+g;X0P9DFrl{IfpP9KPg z{$zt>y+#FPLaRf{vn_UBKtCuQczNS(W}DG&_*L|;2np|I{mq*wN>e=Js+|%Y!u7}V zEXCdd%6ANwdIc|$Ay>^NOU-9eAYHtNdOP#;+y|a#JChP2?_fkcdfau4hBFj7L_o-$ zODLmoC!}Xd10So@0q?}^e>7!>5P(uxG;j3Hi=u6Z{dvZuKFvG$zK_CUA7Mc{W+{5nqFARj$T8(4|YXstsdbfUsMa0(zp*$`BI>3QXyhM$c zDC9x+pYUH(H@Ss-N;^pVvHHoEqH_Y#vIE&PR8Q5kQ!3`lB^APkY4+P+d20o5T>O2m zl35t8a6T+tu40&$xT)eBA9%h~JlBLjSGh}J-}Dv7He~IOKH?i5IZ?N$vg%GZg19Cp z^F6ITIeyq5R7}?l^}?q5iNcRwqoyh>jW@yPiZ;KgJ}EEaTYdLAkNDT9Eduy>daPI@ zC*_5LoZOn)r24stuK@NYU7jtlIH^ZHf9OndV`_^nSsdNXMh2u|4GJJ#R}N2LAJX^P zlJ{ORMMIcIdgVVSPHcX-!A`XD#q!x}twrV4j@Bz`an95$>|X8`Kj==n(ixCE8>oURN#A<-Bhv z{mpfh|;Wk@`LTSBh_B{@7N>t5-~r%`R3)CU7^%9ImbaO}Vg_tmrCZhP z$y85%%icY&sQ(+zvrgrhliSwSVTq@-13M492X>aB_kM%~%!6l6^<0QGZ&rtrJ;%U= zBlwlJ8JMABIi&5=zZi&5$E9Z?mdF0BY%yzX|@CO z$9Vui6;)5piYAnyx`_B2Yg@UW_qq7sQGFU`@F&8?9XEERWkw2Q9A4h5u!? zgxZ=jM)nl(Bu>g5o{W@ywW!D{Fx+`IFwxr#Yz=Q6)f8e5h86gX&Ky0?Pm!QM-F&V77k*+-zUIHDVkahnf;Ck6iZK8qJ)o^%U;2 zue;42Dcy{?8nE{=B+lEef{<$WNAiKAHU(Ev_1JPnA7$9@Gk6R>@8NBj@7_DJ>5)}~%UB>ynOh#K|{&uPNEL`$u%lO2?svP{kEe49g z+yc)FX)Insi5ZHAF*h|1CQ{1y0Ai~wA3E=4j>%7~R65yRO(2wcgq%${yvVac0kdJE zaFbEAsNRhMX@r=~8lOY|YE$VAg`I4QW z=X&oi(8QT(+u}eP{$<#aaLZ_(Xs%aXi^V<`RW3W6-}|K2zTR+D&XO(zomc9I%JKU? zcxw{Y%CGrEzUf=Lgu-|`s{sifdU4?I8_^|zc>NIKQynLrS0~l zpP@G1G}w-9F5?K#urI23Y^J%1IkX!AXS-ej5F%5a{dPmmXUtI#I2i!(RDsK*f1QZ~ zCMg&`n-i1M+8@-TO*}FgUJsj-NHQ(QO8rBgYh!x;J!%O-%z#{~uQQXoD(}?<`z6Z% zWwYyIHteoQCNV9uGlRrFPojuxB9F?yCuDM+qNGUF#c*k2meELODd2Pqp$k6CW`I*O z`pQYbh(j5#D#fkqKG~+eTokt#t#K1#K<$#`Y);?&g8|B5$PaB^;st9R%D;ILi6-Rr#>ZT@;O3ou4t zTVJ?F>of-K0wf7gJ1Ptd+F1>)eJYM`(1W_U4^ zx4i*ZwW)9L~9CC*!?k;_Dgpv2e9_>5GUjF(+rqKm8D+0_KFxfb<*`D9_KuH~FJa zyTb6>W!<#*u?VNByF1g3w_c{PX!|}fzLgu04I{g*aS=kT;y$kB4p7c%15w2saFUk`vSK-h`S2la)Ok{d)E0Ok1(~H247F zjq@+O;khv{g-8T}2YHT`}wisVk(znW? zJe8onG83i!%Wu7N1~g{nY@Tn|iNic=BYS(gyr<;#RWrc6Y6}Ap_w84$=cu6juY#b# zNAEB>dJ%ToR$0nUG&z_f-HuZHoF~GTNRa>nKV7|*9y!q4MKb%h$%l66lXNNog(SB< zfACR~vvOcP^w|!NYT)Sk%en2f5n(0Yc`yPath%ep@|g~4g};o;pW!Z_Rj~jtsIxE* z>2GyOxxqXA8Tk65$X*zOvDO+e=GB7)<57ELb8&|i_qC%%D{tlhUlRRiloTq8{M4_&P6GPCk?yz>auig|M>nhFi?V3Loy-E zX(BeVM>JHuJ5z?W{&L9@^l{Bk}KO8N+h^WAvll>ZD#V|(*Q5O0W2mLM zL4GDsZ3zTxqY;bn!}}+;)E{o#c(u(+kSQU|q`2Uc{-@6X9Vm4Fh?_WN?RlfPjVw#= zPFps^*bR%r7ji!NnH-oGtP+4+3JpCH1Xd;ZLU=gNf|(YXrfx+S{3v!D2DB_dm;}rA zWMi>~i?@bQOrN0Lu^mQ*PuIWh+$dw8`}wkL7k83JJe78ov9v3F48R&@74;3Zih$Zo zg15>SvP)rE#`>{fVz=lE13?pMpvzSZRl{@OgkP?F1C#Xl90XYO0Xegy)h>f9ON0z? z56#x2o+Y)UjH)j0yJ>P<$cwl)^_oihJywMjW7jok{nCWXH%zDNZ& zKL;mygV?Idp62WBZ?qD48a z0KTq#z^Y>*hK`_Di`z*l zPD{c_DaP`{BDUVKKV5?GNg+;U=tBeU^loxo4v76DosGHy>5M9b+Pp#4cr=seXtrmI z#w}!Us!jq=H^Tyugum~=ss#a{5UmsTU@h7Wsim~5ASuFMteI_x zMI~UN`}rH&rlIcNC1=(6g0;q|STmyRqUD}v&W z7VCVtC^Hw^LbyVj@Q?nf^B3(r=?_%e<~3CO4)THH=D=)ioG)roJE(1TRW%s)GWa6- z4DTG5okv7_n$V!}ZgJV57>g$sz4FaFF2jSkBUe6w@-_NZ*)Mz9~>G5DxeU zfc;&+xR0SGB&oZuxP?E*h)!ESDlP4KaQR~sBo-iUJK`Rr5kFT}m2;5|eCzi7{5@xI zMI+uVlA?HAg+&?P&q(>y46Ky@zit(27QrojZmgYB5$Z|qOr z`|in`Oac8Z2%iFjV$eb8&uxn*w7GVGLvc{-5w_2nbL;3evh5f$vA?@*SC+W&eRfeU zd^ZwC8;_r@eer1MiQGkl(5A!K>|k$c*4>uh#e;k_Hx+5FxvcacQ>c(b$txRitDcFW zg$UYbt_RmuQv7jCYJZFrsHcOgKhz7V5diGa^(R%FvF^Wfq#pgUoXjW00-&9#qdZu^Ue{(O}jN_@F+iTZC(4)Mk#b}|0wjVG0vHtQ*! z41hbV!!jHRvw>j}7)0cY4l(;GzMp+>E@g?#@xAMoj<)?_$kLvoWzk5l5M0iE=NZ4T5<;x#+Vy@$5~a| zXf?wR-(x#e{pszi?n4>zfYvXw^l!8y1ya0NO+Se(Y-85G^Jb7AWv9{}Tc&gyGGeE8 zvmAA@T}an{h>u?!L#$i>ArYhj+s(L-g4w2|?OM_Oq=`ExWlwF>DqD{An+6PE7lY6( z08Ti%ZZ26kD2a%dY@L{U8cdD9Sc%&zh?6X!6|?*Tw)N*TzkDPZYNA@TLhjvuvH6Ec zKoQ3PKfA$Zu9tjH(D}@16ku8+Y{Cdpgw66FUxP1byfm?^EEaAfYJJVIh z^qlr%2Ryky-R6OGi`#5umYkaotQqa;jcP@kodD$EjUj6jS}E1h0`lgMFV0NFUf@UX zKK%Y z4nYl}pZ`J)wHSxUDjREeGu?blrcECHwJ=eJmt@t8OF@7-dMTERm-Kcve}4X6LHKQz z=yJWk9VxuLys-k&--RQMsBV9&8u=EoJ#6&GOX^t;RN2;N=P|zd(ieGvtt$1pJl(&4 zdA6&2i3X`toJMIhU)>seF>39Z4CJMf+aoBG>ybA!7jIl%yP@pO5#V>5Zno9nJp)MN zJ)A>BC!V{XV!tOz+Mze)H45$&;R{MR0+G*Qj@DLVKtD8^jr)r9Qv28R?zkvqUYi}t zc=&ucL*w~M{(`s+_GHEppm;kTd{S~em=ACy-~t@mJ1!hAGm?uuQ+2ki^O^=euSvRR zI6I=Gb#P`W8BODV^fvD4a2S|b?P%1;4hOQI@hVEwo5Xkd@N}wZAGVt_BZ#`^Te+`r zj%cD|H&WG!If3bFWhPyi75EVUXbKH3|DYb8o&49fnN)hH?}b2Pvy8npUt`$B!9bs< zh0v)uLj>N8QXs(6n|iXSKY=5=pg$pHUMHMuF#I$@J#<{=uU1ytqYj*owEw{1oyo5Q0)~#U}|)D^Pb@TKBy2eistcaJCuQ={dj%Gzgr+^B2Cd#K{wiBE)O0O|RE@b{(*K@e z@$G9E*X^4RlzXC|X-Q({HoK&gTDM?%DT5!Led+SlqfSDY^ti(ZW_H6J)$|?6c`tqs zG~{k}m;1p{N2`$x0AkdJQjCA?zc}9cu&8>A11yVSC?_qozv9TklG3OzhV)_5#}X3 zm6;d@XNL$7nYx<534tZ{`(XqwCJ)y0Y>;t_<7qHFom)^hk!%jG+xNLMsy{4(vOkQJ zvX8Jry7%dNaz}^e_xe{-Ge4XedX)b*dI`_8IZ$5MVHq0S{pRXW{eISnTh<6M-iwQi zwK<842E*VJ1?0+$2!J(`oB$1ZSH>_^1qHR-hUvB0)Tfu*{zwe2*@#=xR{k6A)hep+ zu^iHvQVyx%$1p$Dc$2=KBsiF!BxsPH)NR0)y0_xhI$fT&jcb%EI! z1xiNuCfssB!a$bZ@i$b{<2jXL)ZtC%nL^TTRdcS5--1sO@I|qf#@dPJvh(4hGVh5JSn2=w~!g9wXxaSegH15FZu0E*lc&@ z$y&z~_T)Iprs#>5YN5uZaN0hA2WkvBKkjiGDsI{+yP%`MpY9WO_lW$?#A&Oo&}Cvy z0Ncd~5-)tQ;0e3b7eU;9E*~d>-g=&YMc@kqId=Zym1V8cI6Zmn7 zAg194iI-4;btvIo(X%A`oVC~{qJ(v-qJz_pyMn24lGF1X%n}lcHI{+MC3Vm&Tc`Ar zQby!9Er!wSJedJ2o~Z4H6>&>vS;t(HmRsOL)9Xe@)+|9kZBPw7Ix)g}|+y<~8qL z)NwHT(flht^VN@nlDHw{KnDBLY`^OutNB}ykjYOYdW#&g;0nz3j;Kwkmip+)5W`YV zm|Hv0)a>@{+oQA@k14>ac$-!2)!~4+eny9t+yWL|g#--FTN{d9)`0DvB8Vb0iwhi#)e0CUoHT;Vo9UO0`!tRI z!En!71GF#e_`yVX)YLT4T&s`zLFWD&+DC@XW;W782+|EHo39^qS7}A$k_V!yN&{R0 z#=@b0Q_EqvtxIe;{45;R!2Y!8Y)@1^R8LRuHrY_r^*5%6@51-@MZe@M*0~Z5+M~={ z4+HW}dcmUWF`Wz%(H&90CU9*R-p3{SdiIC!K4d*8R%A3~2sa-je^_8>Y)P+%oHI9U zPNjv}S+H?M_dTA7=~UQX1?DCHm<%MIN{4Eke(WyX5m=c7Qy4djzPsM)v)57YEI&_S zR@=?7<88FL-RHpb;V&@o{FgdosiYZAd(NujTrJPgASw)9vNj+6yRQyx6>Vb*Nj9&B zylro#P*-NgV%)v{axBLQ&>NMthe>?5`jagm?*9HB<3Mno%2cs{%q;M@q};{cqd22Y zs)z@+`%9DRk$~gN;2e)SQf4ByqQSLpfeyC9LM zNyZgfo$+n>f!(JX$=S~@^No1ibidJ(oqc;x>}o})q$?l|eV}N`JuGio<1scH9syfFIL8>>^7-xCt!S`>M}k^6e?VTPcdP~lKa%>NKMH~| zcf@=ktZrav~krp4}BQOxDEdwqP{vP%I}R=X{8&aTSAslN?_>_P-&2EDd~=- zq$C7MDJ4}pqy;ZT2BRL0#iY0~19r{l zy2s(lydi1lp)#&=UhK0}s0xi%*gTBY#${?p!wcgKdc$fdsy6|gR!sgOCh75?sic8a zb`V>jnt8qWzd27b?{nuko|gd)*bXph+O}9N%c+OQ_us(Jx}!qz)>yi%R1D`j;7slX z|H=pW{Xe>X6-+cjO0*UUOkbJfliQy@3Hsjkt1DesZ3-tS>U{09;JH4 z6?vkcd-o-(WG4^}N$Ex9tfrpLVIAw`A99fp_=H6qX)o^MnL9G$=TsCF?`MF=R-(;X z2_q+|Nw6YA`50j4peVP}+HcmWi=%aYvcNXGGTENLPIZ@NGS3b6btqqF{pilc;D??K=ixZhfs>iz@^OeG%1QnHV8l= zpG)=3Vn}@+dg*i2Bj)9hRn_$^mb)qzqg{SnR{w4DUDKRt0HJ=Y!^U36=Pz5gYMjB( zyjNVD{!a77r}(Z<1=VHp2;lWqCMGcCvB&i7lFdRqm|1Jqc{&@|f5F(h$3bh^;rAxH z$-bM)nN93e$hbJJiCzXSFO8b?yBpkmgXE#uL;Bp^t5okFQu9>#Zy>cnG2iE=h9#v7 zrGk&(fy}JwXP-6az^_zTTC-#VUj{b(%D*G-S^paIXa)JbD4+es3rbN9-TQJ&P8Mxe6@@}n4C$U4Rr@R{t2CxID&5kloSZ&z;NA2YA@eLM>6O=wZzNs^fzXWs@9mK z96g)*g3=@JJkkYzS_XcGk!_s4!9)ff&LxSLf8`ZqWp7fm%hYx^m|^2CbLu}o?8 zpnQ{UU|K{noFWtHt|~4360;xJMb9@*EqGaDb?4Mc=fEB4k%x~^*+k{IWKnmZg`1B4 zL?`C_2wx;mC%6g%?)#*5s_0Q_Rw-So)_^v;CzzK1w%QT%kU%whSc7Zm`@M!)5C`zS z%a5f6GF1EtYmQr--=X*2%u40}^vAp_MZc+347d1eB=eMin|`W)U{y%@$%at~nEZfy zHxIXisvWt2fBEOnZA#@x{3R`UOl-*fJ|HGMa)|!ym?~G^w4h1Vc4bK zuP8S!O#a^>rjvuE3fNS`&1X1L;Hg_6C*ahH02PX}IE^XnMhK350Cz9%!K7;KPJGy_ zM^yejSu!~(?)KV!j%8_~qI3IO{l zrq-S(e6?AFu9f7&`}rzkc-0$QG~_4Tl|i(zohA5TkNf=v0aijCp+;&jT2@B6QKT$1 z+Iu0;EAZft)#n!z@;)5T9;4LPzql4(W7qvZwv`p4qU_GNEBb5*>y$;Vh7 zhW295dF`K)sCL4`!KK+}1FuC$WJQ74GvydlSt?!y0j^kD)san%`PzAw2?33ZknS)Q zs^hKiyo{+ouHlRw`S9r%+G@?LtdWPSI+KquWx9(LmJEz}01psdKI*>$)QKSU%Mr=; zmsYf2e{HSb9L=aLL1_1BtpT4^1H6i`cMTt&shj|&kkRi1=*+lAwP82Y<&Y+gMq^AZ zdKEa3ESLQHjW=06fCEqg#(pQ1{+%7$J4Ud3e7*Ob%-D?~8{5QEm!CVM&}|ED(6ppU zQ%0Yv{!QC(GT)`LT0nx_ZH28b2_Z^&#`}^EQ=tTbs!>$ycZidO&woV07Uj6X?H zf`f{TryKm%74EA0$`@+9kyyDcN4KLD+wt)fvi9(NWa`Zqauo;r-jle3w%CnfG*PV< zZP{(@Li0+>uWlb(K6&k{;okI!_j(FYUSG@|N5W}sHxHZIHlk7$ zqagRcvJzj9tMH>2d??x8pGYm;7wy_)o1fkaOa@ua+pMDz0y!5x{_PgD%vNml#Jl<* z0xBakn55q1#Y#IK9v$B>Nta%Hkvkb$IEs62OUDuQX`aBlh~U6VoMcp}a?DBe;yu27 zXFNG7yXj&N-?l2AMoe&rFR06O4r7wLH=$XF>{8tEtELH)sj2g|F(OFe^QFU)xrbw8Us3 z*XQeOaORjjbojB`A#^#wLA*FafhnpHa zj|#WJ$2f%>6W^f6xn4831IbwroL0L$9Wl-ri0u1Ewu?Lp z^o%E&H*lry=UxN*A>$^(F_ff_`WmztSKsdC`$M8drHK>-0QqMVuU)P1pA6>|dVXu+ z`Bb5uC%A#PR$sSgGLLN;ofCzO|JOL`2=o(aio`AD-@Kh8i?p}3rIJSLCJ3(FFh$3a zvM5NgavY1jRVnV4QTU=$)QL5tm|~evVKU$O2uew+f^tu$x#lx5E*z07VXw{JA5*a_ z&y^+lM2JCrS|A%$Ew$2K_meMJR#qATWkNBV)}M5sQLom`yccZdUw_(pX4zdCi!yV6 zWRi>zS{&>IjU(QONRK=h3fC>QG)30~+xO(;5C#2b*C%4^xp05ys8D&y_OGrBN}Cr< zaS)fOVq-WU*P>ii1+`A}#&q~jTK-$7*r0uw{NMOWJ#J!YYR!YrlKgs)^Ys#uo$;5o z-j=W#9^IG@m+*tHG2{cuPpO2L#Y4-IN6U<-CsiyZ)J!-Eu`S}ZYX2svnh%RewbMaX zYkId*>z)hMDBq|cxlUWm0-4ky0dJRJUzX7Vfzl+J6vlJsLc|@YFF|O(z<+l=|Lfhn znA2aNxu6&_OK#dWpIOB;mzdwDx$sQ5fFRu~FpcQzy-}Gt>eb05dfK07uWaI5muB{cIC%rA=T`XEZ?YW;#XSyd|&!_KoArsXHku@XbMLPQi&Flw#~6>lu74k@}?9b`(|o ze)&oldD;L0h4GCc0u#tPypR9F+pc=Z8T@{JCc(0*RMWjQOYhk{tiU5rtW=v>pE93$ zrWl7Ofp5YQctNk=#z@v#wI1iWBY``x&E}Zm?>E8Nxcl8&(5ndZ*i9?5cxH67KN4IG zF*`#gfqg{MT3}p+{`kvQ)9|ax!Fp^DEL)qi|CODqU?eYVh`^`yThU%;O|P3g?hp8h zm8V{3@Dg0q#CLgYTGTk?x++X@)ve!b&>?!zb^x$8iQKl!^WqfuFH_j#3X+H+-Q#A5 zs7Y&{oi<(&@o8@|0#8&O^Jj+nw=L=S(3xM{&#Ag!x^%?<1@Io+!;q7i+#RH`vOw{Prz>Yb705z1GSoONH1?+>gM?#l@lRf+$n7&q<3uyf zKx5L$2gW^9l%$$-koo%pVB)23n2^9OSDcjeR2z$44KI-2)|3>8BB*eJ$KtU^>bg_M zSSz70+{!+8EqM`fWqvZRmpHMZ{VUk^zF)vI=W0+>pNMxXvj)B34z^6H#H^SeDL~f8^IgCa>WFIZsiSLac`#;I6qB@4vktPBS=z z7E0dxsIr~`n#lMpo|qmVRoyk<^PagfW7wtRCucazFbm7U2cGC9d~okvCIS`E`Q&*J z3|hV>8g`czl5hGktZHk2csAUJ{&woP(Y9G@;i9~8YSJ2gWLC{r0Utbp&`6g>X9S26 zD8OiYfE5hM6pd%qbwj5bs*)y-16{vGe8G#z>?62|F9D87(oyxcVYp~mbrenD;HP@nyzw3HN**E=0|CjmcptJxawSXcNP z`;9|OMXiJQvpN>|jvDXSwbS@bc>Zome*^S;e6aQG_uUWDo_)VEzmr1*xhv4V_X8TU zzh4P3LlaA?eyB`8{LZVk7aI+5y@A~Lh8XY24(0KOkD+KpNnS)L5Bw<3l1;l$u|Q>h z7t}LCw{d2YDa6C=&a>eX9vWS0F{9-U(-lvJ=~Am2lZBeSeJj)Kbs8C<&GRWQjYVJK zfuOp{2F2q)={ww$rOE$Q1km63{}oZnUi+`3(K)B5kmB|aq)}A0IR!thHx{0h@Ym3= z`<}w_hf%x8{}5(e5ozA=kwHubaoa6U~qkWwm@`O<}v7S=~+J@C1UCc_2yjYY1x|eKVm{L zIP>=OtiYkA&=MY~*i*6pg+_D4d5lJUQc!1ZiQ#3RAQC9`@6p$e7_26 zcS@fyS+aK?A*|O+opq=ne>z36+@3oR7(MC&C$}WikC+5&U4jXia9C|?X-KP$(chb~ zmLcCI5-U@BSQ*XKa>C%1p4*`t`!|0vi6vELD{65mG=BCP53RWMGi2<-4@JwUkGovh zbba4C`Sypcnzb@EjO&HTm)3YLXt@OnynXNo(9q6OEi?Oq8Wmy>(CbVP3b<8Yd!obz z68Zo;QMNIg@IBIZOF0zmePrNJI*V^DK5Zq^$~bVg+5c0T<6~<`wV-v52iljPQ4I*? z{V9h)mS}uAk*4M|mhrBlp>w4t;SJLzxdxCg>T7_JBUc%Fcp-XcCY^MRyhwED+8jo| zUbYV-X3;pP4#m?Y`^dz}^)h&;`y)K)Tcvt*GJHc{5$#vy!0gY>Z9Jf|8susFGAoFv z=hW?ADGJ;flRyJGTb1Y!BZQi_yB1)PIaN1sMzAzoj3iW$bt@vg>d-6|$nItClZmio zO|A&Hv=4T&4dqz>NW&IJsGx#IrU(>U3(l}SB2W-ygM?y5W}c>)^A}(92=a-%VlA~6 zf_c!-< z#3VGAiphV&c#8nCl3NK^MB@X8_s&^s{$`NzR7sI*8h>*rc z{#fC9Wab9<1L-$r2>w<>kF;_Bs7v@#4dh*v>`X(Rkjb9nPF! zf`WruFSHV30&v^+H_nc?20uc^nqbhXCw{HYMxhYcgoS5AN@`BbT*0$x*W>2jTWg`1 zasgX=q(Fvd<>$J(=YM`X>bon&AIyu4ygR(K=DI9JDnwm>W!kpBS>5E_=jp-iPOFH9 zrzE$;2^1H)@+Y?mTo`j?BnpMmiav#&OUmMC*U(%7@UTMll6Bw%Q~iSnkedy@8hCiM z_2iw^z)J5z68tXh8AoP;a&qvvq=wSvo|2wMwhYmBc@^wJ`aVMkJz6<9lWsz_r=Kj} z#5<58i#Z4#HTt|r_kukr?Wn{qM$LKN!++WjLi{gl3NaZ=*l4O>v|fr4$TaKzDw>5m*V zAfZ@0lSXs~75Yul^f8{`E#0+jH@8%n-E{JWKm>>#DS4XmG zaSY*pwdkt+jr@x-W$HI(5D|u9szf@qT%z`uUyZf%JpIa^ac3x-DCk~aM770$Jo|QC zav?O)@K!H;t=5NOx>19J9>D#a*J4|;J^xcgG@s7aR!@3I(XW5F?iWrRRFzMZg?czp zTKTzs?^I-cntnw;GUev6AkvC0We}e|nEB&OHfO_wNlM?bHSeYc*j=9m6pO)g9>^tT zp-{9X^bOq5$jMD<&(G~yGrtsZ-aE&)>Vx1QdS z+%qvoG3f;KhMEvfBgM^bW$agBFxoYSPp4;dad3(p{Sr@)up+ZAG`eTKWqYfCmp{3N z`MoC@tTZ1T$SileTM{A2;q{Y9)s#7w7J<($Fd|ukCG1r#A7kfHz+Us3N}F||?%x_V zg%76-GP++q(VD#iafshAsSL#8y>!NZ#<17Q41N)(El-0oiIaWm5B7uqzc zG;73%E2*uBrCO@s`(b{ryEG8_ZoXt5SZX)51LbW|b+BW}oL_dhs=jEsqB&{#E+Ft3 zZ_j4SRDH%XrBC)jJb=q^yL@1OXD|@5X}x%}SVge(xa)D~9aE!O;3}VC4c4$Ps#=+8 z1}2y*|Khd{k6cg(5jaL4Djc*!@O>A4bvUQ|Culg$Mb*NB<@Qy7qepBFTIC#ivuZ_4e>1e*<%T;>W$FXOFZ)Jdu0DC1(DphbjzX48FhXM-fj7cByK6-~U%g$xj0TQebTAYl@K!M!0pMa4ng*#L^WG9W`nW)|lE$qwSX$xhN^_ag= zwan4c6U8$gv1`6CJTyoo9ds~B1YI!Zo$!W%OoTNVg#h(lZz~!We7dsSCHVEehAL#`BH7!t=h&EzfHSE`Nlcf?YOm?cWzOU@?UdKz6|dZ55+W11d(?qUqD^=Vbq!l=qPXqU8=(tV8HLTQEA*1JEO+( z8`Kgvk}dwpX|X=O%%~;pl1wea9uY@7g&lnJp~^ob8B@T@=1<|ESacQ8By``&gl!na zKX0mz>1;hk-Bl5;tN^i|RIMQ$>HrBY^x+RbS|abK_p8-bxC@PLnD*0Rl*4G)9r*KU z``y?9c&+mQJGjgR8}w)G*W=4l{oQKWcnah7uEUX>m&)Rt%`P*)yj%&$2i{U`5!M5t z-6?*kFZYO2IEm-|*TW~4RSpe%a~(f_b0AT}BgWCf1@G6M>UJA_;tKXH7m$kXpS!8Z z=8-@d+D$P?Ff{cB%;xmosHm&0HNN-b#HsvM6u%Qx zInLTEgS{HGr(_S%sP@`}IQFElu)_Am#G9z%_hTAR0(;yn%O3E2zhrXGCk{tb{olA$jnENeqD;bJ?KFA2xiT+o|>K_Q})nD z7N=_=k?)8~iKbDB0d3T|=!$*X*MTpB>__nj+ilc(5FM?viUz^j;f}vAh(Dy|2E!f} zNwq-)XsRv#>YO1UcL;UO;I5`==WKk)5VP3s_8qb*NV1}1S-Ygld}gUg?ec%eLLLRs z?%Fri%zPx@eaT$H*^U=i7Kontp}BmEDmS`uaNp<|H>iEhaNux$-LX>f8yCatyQz{= zWIhMYVF&~*UuFLkfr!Z7caetZAeG{uByILXwBChp=9)`E#>B##n^T+D8Z?)iyWFPy zNz_978zzpOkaJQ}T6{p7#1WgGdlRx#Kzb7QflJ#JNNcd!XRR}M z5?vBzM?C;UkL@>ktzE!v-`Z^rkk&|P0S&sN!|}~LQ~27~N0thKsHR_Dsxxa@BI_?C zgLOAc>^#|zfp;>V+3U;m^3SewV%g87gRPm8hrbn^xV@KZ7BteWT!2YTFwVK<|NS4$ z#;WSz@QD$5zR6ri&KC@NHpvzX5c?Q#aK6yi6p5y&QqlMr8m%*8|6+t-_+p#h`274l z!?D=u)lN-k_g!pJ7QHyW$?8+TGLk+X?h!ix0cYKjD9ONnuJ_2{qj{hIHZ)JvCD*sB z)NlQx^=#R=3SeC{aDTI$7+QSR%RAq`c$@QawZ=w`4-CkX2Z_OfF6$kzZv6aX?2Fxk zVBHTD6J3w741#16OQI=o=pYEkI|C!PY=*qOPE$PX0jor**TC62Dsa6ngXuzSyW4nM z$yl-gnU{}yyrNWT+GYs8URS$syIJ8iJu+JIN(V}8p>_eEXZiN}71tKI_j&_;e)f@rA!T zXmCf~Z+LL~H$T;!SR?3f&k`dgzo7!7)2w>RcCaMl;mNyf)nDix(H}1l8#>r7lv<;D zZj+WMtx?hI8@G__q$unYUyerMRTMz&vsS*wIA1B978Q4@?R^|&Y~0j&HD~6B-TXT2 zI#!>L1GA;7^H-Nh&;T=_$F1y;U4!Bj290yQ{2V`3B7o8H#BZyDY98X+<~a*N>D67R zxI(B$icaEJ{J(#i0MSTk(UV`g1-l#5Yidnny z)rv0Sx}V`V$@S&$V!RPNkKsuCogn^8Pc@eG*)r8xja&s0^^)qJYY4REV*U0lQdd;% ztP$n?2FP!IraE~`czea@esAO-rL%o+x6iO~c9jiGjHI-N;b^V|T;{+mgq-X0)N$Ti zZuX+fXA|f!{d9ZF5F8-0VD_PDp}X=C=2DlBahAw0d=@#PBky=0R6io31sgrjdfB8V5@=mS3OI@WJ=!Lc>=mZ zQMJ}p!t^NjXX`LPAuZH|Dm{^Ithn_ars|1wKHnOA#TyH(@8UfwUC4bVa4I*Vf+Z@9 zjf`Ek)Ky-1lx@*uiaS#6FCpyzw9GkfDNGbYCt{d5x4U)`v6oV4D z6aLdOvgu@V0FVVP)k`n5PYyt^CyC9dWy7zo%Ra^%Ug*%L6`9>%Up&y^-o_-o1so4d z>#b4xRse|xZ^=ATBCq-b$w;;U4iGRfTBq*S!lr61vB)swN{rMMpVT4wgFOd2AI{}7p3yhjCWb)~4_fWV5l+0k|df+A!KVbfJ<+oaj z6wv?v5sm^gweFo;EnY9{m7=S(9u5H@Kq(p>F373nYToOGdg&AV_%M(1((dMDdGD`k zJHZsn%oT-zvku*%7o1ehJJl-w0?kYe_x>E8&(vmtkFG5Kb#XfInT+FRM2&?NP0(6H z5VVaTQ~3_?b?v`k(8ub(`}J>9p(VVmTg0sZg+wDoBS$UVKSR~CUYr6NhYI72G>dniOs@9&+a0S6-Z90G7W51Qt(kiK&eI1&imkt9hZTg zH{h;ErT}MvwW-U85f8aCK(umaK^ejQ*5hJlbnDBTZ>At2p&#k#Esw8YceutrjOdt( zCz_736AqbQ;BEdloU4JghAEF8sRP|cDfi)L5YmUunNP5d^+SX25*RYj9~MQUHm35l$>%D<@C2MCJ=~{F1)mp&S1gjd=Z+pBf1($eyX{*>gFB)>prTH$CWtr z(1xdIZ_FPFKcgwxf+k86R5&*(mF^&^J7pqjSi8$oqsPst6DxN;1J}g%Zj_20wkWhs zsHEdblZrn#3&=A>n=^aizX(ew?^7YcugOevt1;k+ZZcwg8h-D_oT0`%`jJLu`atHcmH9a8_M*bi5Y$E96Y18G^Yb_0oTHw+lCtLmhV>L@Ovrxl`@62*hZPyq zC~6!~4?UCVTVHfnKeY?4>Dy5h(8$L)M52g0e6G<+%1o3Xfr^XOu+gtg*Iyk6fdtz3 zR~^TjiC-!^Q11@G718F~KplHugAb5^Upp}rAPY9L^CSWcoN%2>6nQ}^KJ~B2Se<9m zl$|7mG#h8mInTDcfK6xbo|u#MB2fWDe^oxME~}X_lO?V*bn`CR|8( zBX&UGlloLd^ob|U(#3>#=gAQGJ|^k9g9~?|ESR#xBx`m9;pV&TUZ!6u!f|=pz72|Q z^If9(!v`mXc0Gyv4y}x;#cgWSWUKK%SD+{Op_$Wc{mW~`Z>w|2 z<}c#(>qAT=1!VQSh(JCU4O~WI+X?)Bs_h|m`!RSK;0S77ny`90Gn4d%;fHaPQUTGZ z?b-*^etECo-kQW;9!!%iZsb9nMaUbk${W%SFQTLYA!QSa+2Doo_?r6&#oUz(7kL1B zK^1^b0|FH#gB4*c!I&hSid`)MLNHxK(HaDUcX`2s6%00e$AJe{PY%t?$%*GpobkcQ z4Zc~n$Y=bFf~!~HRBI}IkqyVyHmbKIt-rlGn0!cpjUP;>bz?PCcf-`)wT3x>=@h2* zg;Uhw9uhD>(i0&<#DGeI^)&?Fr0`R&W!{avpnRqXg%%&t6VoudNA9 zNEfs;n6=pwBpY!sa^lsCIjA|ip`y}qXJq{96dp2uEc|D1!U8&w-1}*gfgI9^u~50O z%fp{$t{0Dwz8Ggo!1(>pl62^ah*PwmE9UkAB>un?$FiZqL=k;=y$?0T`;`Q^9KE~m zxEOm`U4~QU>v{<;2nukchi!bwMe|m)%C*v*J;a}H2xw&(i+0U{crjzxjyB#hdHs7> zN3BEgIRQ721#Lk96tT?I8NH|GNsH;%pIRRUmYNUg10&hsP}^#U86G$EDC$U<@Sirl zH=u4T@6ddO<5;+{5f5@T7sS-XL4I5N}|cWq4jaz8T)z24&zs1ejU3z9}lh7#AF$@)NQ zZMSwDInIdbMn)iQ9$fCscA1gH4=WM1g1=X0)bpq>ohcWDl%)$+f)Ut!!}nWO-w$)s z65tyQ-<}XKB1JP{N{~{t7zycoUF-hN0IHOipc(s=%puQ$m{(Q|m88pCeqkn%p8U|2 z@fLg^*GYZcDY#uu;q2+ZCasBcQUdoo&0R@=rQ*;t8JnE>3uIvAL3ZYo-l2W$K-1EY z6Ys}3tYX1BX#20Z5OOn(^qbpo8q&N5qLr>$sEoqGT!(Y={N1?78Qejll{V1@>L19m zPU%&x4iI06T>9dPtA(F@JDhOXihDAn%FPAp?%}DRZ~qL7Z8qRMgPtz#dY#d48^sGd zMEm&oqNuj0zlrx)Z;2Ee38g|Z#2t$tWKg!=4cG4hUSoulHqR^j!|NJ8jBtB7&?Z6Nl(9hr# zd%fw0TF;XpS*IvHhHs>ErwtBo75$b_iPtZAEFMbbIg32*oBR%EhkwkD2>>_!N2 z1kOAkb5~9_T#)G>er`!~pmm<{U#V-xIQ;7kluAK&x9Q2*cW?f&M0Ml8Kc{@u2hJi?SPHsXentDjpedHT zamF>K_T8Dp&TE`7*5;xV&)^;Vy3hbxMlSw9weM?C$g<9k?~uc*XeaqOnpfIM=19Q; zWh+S{7Jwy2p&LOg+;7)lFMF+?6;>!f(js$xzHksGrc|RprdRU8)3<3!TM85vKkhIN zZ8pdBdy1wS@V&+i*y9Bm!K*MZ!Z@K`k9ip%xXiKMVyZ5Gqc`Tb3OZ1&u^VLNLQN4% z@j2NMF|gu&s6rgn?r$Jo?tSEcPkcr;eeHKU{+pkZ=`tvXe#mJ52pvB+8Jd&^Y2v6s zdnmUmFB0COEIQ^MVcF>2f%mX!!!b6@GIp6Z6kz8o42$@IS4iqzKDIzTw#&TDj>Yi4 zo(D*AXjBrLqYUVbI-ZX0Hv4YB|3_B0rB(=PL7Ayu8tpMAO*eefyluOS5WTAe7F&nV z-0GP97r`CXBp|sk^~)h@>W>~rVkMW9H2U;R_Y7_e44O(HjF?d(qN6gUd(Fp%#eN(Y z8Gih;?LL)wVu!yRU zgQ?~>$2+4N_xOP8$JNoGL+O{)UrA7sIrbk_5JAmUg~I`hGa z_8Uwra9IuB0GcITYdR$*5?`{nhgJQb^wgFrjUhIhro|F=^G=UxNNWg9te$EpVxKvS zzkt=M=bb(320eYsyxi;qbu_Gv#RS92#f;|qn^J_C15fWF#j>)>i4PsPT1ds{*lgx)7xy(5lj zw%rh)dX@Q`!~xnn{qmx39qT2K?Svy<7X4Xxexc7~K z=IXgZ*j{g{sBz~3v1lbdZ?DVWOQjwMZ#!XsdfA043uq4}WP2zTg9!#))HxGQ+XGIg zV18&b|9y_orr+kJAIP16=yd2U>`#qluY0UxHkx74XCwVycw90ZpcG(TLe7#oUNO)y ziCiF!j0O<;!lrg!_a!2B?tZCY?;qjlH4CX|eMTE!ptB6pOT=^Ua;eh#<`N*@`^0yrz z-$x{>=TksAvsZ)I{DcyuOtd4*`U@^71QP@9`gaWF_8RQEr2+$I|YC&}#)Lf#zGT{2R<`{|!O>CuaEzIEF0a?x zMEzzQ1PK?^D%E>-Mvwz`X}9&ib8aYLbb2rQvz`@W+|1RK!Re;jdAwzeFV zdni0mVbbzLs1S|0pnw4nY&TZM-Nz+-QfN^RgriTFc?n9PfVQ?EApuY4%SZS39q!wY ze-ZF(ZSg73skPqUtXP3}!n#-50t#|lEHH>k>9N8Z!YcsPuiza#8}b{tg=nUe!1yWu zdS?Rw=4!Rav^WALtY|!>t{G`LDWLo6GXA7dTmAR*Q7>&}dR05*Ud45O!-k*QFc79*Yyp$Dn1@o?13-qcfA{qJ!YNCw;SAb6%U)^QIX3d#xgSVoU z?y&(Ak!xNzW*y{ctzA;fhvt~L(>sX``6}BV+M9D|ewbeyW_A^EZUAvZBfi(-j{Fkm zEwb&j(&yXQ4Eke%(*I~4rJS#2ki~pTSOf|RR}e0vFe0Ot+48f-8I)Z-EjPdt$u(bR z8Zt@Pc4fL)o7+L)@+F*If-??FWpFWNkSIuz`95^#D=Hey{Opn3Sn8_?(ZVRv`>7cBcqPl4(3Y%@cDUWiTK)aYHvr@)$~qpPeqWC?ioJ8R^CcGnG-^&VTFrBw^H&AK1Yk z;P+>2eP6$Ic6wC1aJ?dNfiDJiGcpn84;@aJ%04onJtITyt~rV z<&~gQ6G@_GRymhMP|FtL?qs)=