From d3e7ae752bfb9fa1f20a69eadfd8711adc518628 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Wed, 9 Jul 2025 04:49:28 +0300 Subject: [PATCH 01/14] Skipping _db prefix when using /_open/auth (#57) --- arangoasync/connection.py | 12 +++++++++--- tests/test_connection.py | 6 ++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arangoasync/connection.py b/arangoasync/connection.py index f404248..21fa756 100644 --- a/arangoasync/connection.py +++ b/arangoasync/connection.py @@ -160,11 +160,16 @@ def compress_request(self, request: Request) -> bool: return result - async def process_request(self, request: Request) -> Response: + async def process_request( + self, + request: Request, + skip_db_prefix: bool = False, + ) -> Response: """Process request, potentially trying multiple hosts. Args: request (Request): Request object. + skip_db_prefix (bool): If `True`, do not prepend the database endpoint. Returns: Response: Response object. @@ -173,7 +178,8 @@ async def process_request(self, request: Request) -> Response: ConnectionAbortedError: If it can't connect to host(s) within limit. """ - request.endpoint = f"{self._db_endpoint}{request.endpoint}" + if not skip_db_prefix: + request.endpoint = f"{self._db_endpoint}{request.endpoint}" host_index = self._host_resolver.get_host_index() for tries in range(self._host_resolver.max_tries): try: @@ -376,7 +382,7 @@ async def refresh_token(self) -> None: ) try: - resp = await self.process_request(request) + resp = await self.process_request(request, skip_db_prefix=True) except ClientConnectionAbortedError as e: raise JWTRefreshError(str(e)) from e except ServerConnectionError as e: diff --git a/tests/test_connection.py b/tests/test_connection.py index 568815c..e053e58 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -223,6 +223,12 @@ async def test_JwtConnection_ping_success( status_code = await connection1.ping() assert status_code == 200 + # Refresh the token + await connection3.refresh_token() + status_code = await connection1.ping() + assert status_code == 200 + assert connection3.token != connection1.token + @pytest.mark.asyncio async def test_JwtSuperuserConnection_ping_success( From 52493cb1a500ab340a720286f2b7b7bb547191e1 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 14 Jul 2025 10:15:27 +0300 Subject: [PATCH 02/14] Refactored request to skip db prefix (#58) --- arangoasync/connection.py | 7 +++---- arangoasync/database.py | 4 +++- arangoasync/request.py | 5 +++++ 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/arangoasync/connection.py b/arangoasync/connection.py index 21fa756..5fa6363 100644 --- a/arangoasync/connection.py +++ b/arangoasync/connection.py @@ -163,13 +163,11 @@ def compress_request(self, request: Request) -> bool: async def process_request( self, request: Request, - skip_db_prefix: bool = False, ) -> Response: """Process request, potentially trying multiple hosts. Args: request (Request): Request object. - skip_db_prefix (bool): If `True`, do not prepend the database endpoint. Returns: Response: Response object. @@ -178,7 +176,7 @@ async def process_request( ConnectionAbortedError: If it can't connect to host(s) within limit. """ - if not skip_db_prefix: + if request.prefix_needed: request.endpoint = f"{self._db_endpoint}{request.endpoint}" host_index = self._host_resolver.get_host_index() for tries in range(self._host_resolver.max_tries): @@ -379,10 +377,11 @@ async def refresh_token(self) -> None: method=Method.POST, endpoint="/_open/auth", data=auth.encode("utf-8"), + prefix_needed=False, ) try: - resp = await self.process_request(request, skip_db_prefix=True) + resp = await self.process_request(request) except ClientConnectionAbortedError as e: raise JWTRefreshError(str(e)) from e except ServerConnectionError as e: diff --git a/arangoasync/database.py b/arangoasync/database.py index dbcc319..c188290 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -2022,7 +2022,9 @@ async def reload_jwt_secrets(self) -> Result[Json]: References: - `hot-reload-the-jwt-secrets-from-disk `__ """ # noqa: 501 - request = Request(method=Method.POST, endpoint="/_admin/server/jwt") + request = Request( + method=Method.POST, endpoint="/_admin/server/jwt", prefix_needed=False + ) def response_handler(resp: Response) -> Json: if not resp.is_success: diff --git a/arangoasync/request.py b/arangoasync/request.py index 951c9e9..6bd629d 100644 --- a/arangoasync/request.py +++ b/arangoasync/request.py @@ -33,6 +33,7 @@ class Request: params (dict | None): URL parameters. data (bytes | None): Request payload. auth (Auth | None): Authentication. + prefix_needed (bool): Whether the request needs a prefix (e.g., database name). Attributes: method (Method): HTTP method. @@ -41,6 +42,7 @@ class Request: params (dict | None): URL parameters. data (bytes | None): Request payload. auth (Auth | None): Authentication. + prefix_needed (bool): Whether the request needs a prefix (e.g., database name). """ __slots__ = ( @@ -50,6 +52,7 @@ class Request: "params", "data", "auth", + "prefix_needed", ) def __init__( @@ -60,6 +63,7 @@ def __init__( params: Optional[Params] = None, data: Optional[bytes | str] = None, auth: Optional[Auth] = None, + prefix_needed: bool = True, ) -> None: self.method: Method = method self.endpoint: str = endpoint @@ -67,6 +71,7 @@ def __init__( self.params: Params = params or dict() self.data: Optional[bytes | str] = data self.auth: Optional[Auth] = auth + self.prefix_needed = prefix_needed def normalized_headers(self) -> RequestHeaders: """Normalize request headers. From e014bf850b91f10c4b09b092dcdeced6c871b0f8 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 28 Jul 2025 19:45:41 +0200 Subject: [PATCH 03/14] Collection API completeness (#59) * Adding /figures support * Adding support for /responsibleShard * Adding support for /shards * Adding support for /revision * Adding support for /checksum * Added support for /key-generators * Skipping part of test in 3.11 * Adding configure method * Adding renaming method * recalculate-the-document-count-of-a-collection * compact-a-collection --- arangoasync/collection.py | 322 +++++++++++++++++++++++++++++++++++++- arangoasync/database.py | 24 +++ arangoasync/exceptions.py | 48 +++++- arangoasync/typings.py | 142 ++++++++++++++++- tests/test_collection.py | 83 +++++++++- tests/test_database.py | 15 +- tests/test_typings.py | 60 +++++++ 7 files changed, 677 insertions(+), 17 deletions(-) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index 810ee06..e3d12ee 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -16,7 +16,16 @@ HTTP_PRECONDITION_FAILED, ) from arangoasync.exceptions import ( + CollectionChecksumError, + CollectionCompactError, + CollectionConfigureError, CollectionPropertiesError, + CollectionRecalculateCountError, + CollectionRenameError, + CollectionResponsibleShardError, + CollectionRevisionError, + CollectionShardsError, + CollectionStatisticsError, CollectionTruncateError, DocumentCountError, DocumentDeleteError, @@ -40,7 +49,9 @@ from arangoasync.result import Result from arangoasync.serialization import Deserializer, Serializer from arangoasync.typings import ( + CollectionInfo, CollectionProperties, + CollectionStatistics, IndexProperties, Json, Jsons, @@ -481,6 +492,26 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def recalculate_count(self) -> None: + """Recalculate the document count. + + Raises: + CollectionRecalculateCountError: If re-calculation fails. + + References: + - `recalculate-the-document-count-of-a-collection `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/recalculateCount", + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise CollectionRecalculateCountError(resp, request) + + await self._executor.execute(request, response_handler) + async def properties(self) -> Result[CollectionProperties]: """Return the full properties of the current collection. @@ -501,7 +532,129 @@ async def properties(self) -> Result[CollectionProperties]: def response_handler(resp: Response) -> CollectionProperties: if not resp.is_success: raise CollectionPropertiesError(resp, request) - return CollectionProperties(self._executor.deserialize(resp.raw_body)) + return CollectionProperties(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def configure( + self, + cache_enabled: Optional[bool] = None, + computed_values: Optional[Jsons] = None, + replication_factor: Optional[int | str] = None, + schema: Optional[Json] = None, + wait_for_sync: Optional[bool] = None, + write_concern: Optional[int] = None, + ) -> Result[CollectionProperties]: + """Changes the properties of a collection. + + Only the provided attributes are updated. + + Args: + cache_enabled (bool | None): Whether the in-memory hash cache + for documents should be enabled for this collection. + computed_values (list | None): An optional list of objects, each + representing a computed value. + replication_factor (int | None): In a cluster, this attribute determines + how many copies of each shard are kept on different DB-Servers. + For SatelliteCollections, it needs to be the string "satellite". + schema (dict | None): The configuration of the collection-level schema + validation for documents. + wait_for_sync (bool | None): If set to `True`, the data is synchronized + to disk before returning from a document create, update, replace or + removal operation. + write_concern (int | None): Determines how many copies of each shard are + required to be in sync on the different DB-Servers. + + Returns: + CollectionProperties: Properties. + + Raises: + CollectionConfigureError: If configuration fails. + + References: + - `change-the-properties-of-a-collection `__ + """ # noqa: E501 + data: Json = {} + if cache_enabled is not None: + data["cacheEnabled"] = cache_enabled + if computed_values is not None: + data["computedValues"] = computed_values + if replication_factor is not None: + data["replicationFactor"] = replication_factor + if schema is not None: + data["schema"] = schema + if wait_for_sync is not None: + data["waitForSync"] = wait_for_sync + if write_concern is not None: + data["writeConcern"] = write_concern + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/properties", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> CollectionProperties: + if not resp.is_success: + raise CollectionConfigureError(resp, request) + return CollectionProperties(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def rename(self, new_name: str) -> None: + """Rename the collection. + + Renames may not be reflected immediately in async execution, batch + execution or transactions. It is recommended to initialize new API + wrappers after a rename. + + Note: + Renaming collections is not supported in cluster deployments. + + Args: + new_name (str): New collection name. + + Raises: + CollectionRenameError: If rename fails. + + References: + - `rename-a-collection `__ + """ # noqa: E501 + data: Json = {"name": new_name} + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/rename", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise CollectionRenameError(resp, request) + self._name = new_name + self._id_prefix = f"{new_name}/" + + await self._executor.execute(request, response_handler) + + async def compact(self) -> Result[CollectionInfo]: + """Compact a collection. + + Returns: + CollectionInfo: Collection information. + + Raises: + CollectionCompactError: If compaction fails. + + References: + - `compact-a-collection `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/compact", + ) + + def response_handler(resp: Response) -> CollectionInfo: + if not resp.is_success: + raise CollectionCompactError(resp, request) + return CollectionInfo(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -552,7 +705,10 @@ async def count(self) -> Result[int]: Raises: DocumentCountError: If retrieval fails. - """ + + References: + - `get-the-document-count-of-a-collection `__ + """ # noqa: E501 request = Request( method=Method.GET, endpoint=f"/_api/collection/{self.name}/count" ) @@ -565,6 +721,158 @@ def response_handler(resp: Response) -> int: return await self._executor.execute(request, response_handler) + async def statistics(self) -> Result[CollectionStatistics]: + """Get additional statistical information about the collection. + + Returns: + CollectionStatistics: Collection statistics. + + Raises: + CollectionStatisticsError: If retrieval fails. + + References: + - `get-the-collection-statistics `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/figures", + ) + + def response_handler(resp: Response) -> CollectionStatistics: + if not resp.is_success: + raise CollectionStatisticsError(resp, request) + return CollectionStatistics(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def responsible_shard(self, document: Json) -> Result[str]: + """Return the ID of the shard responsible for given document. + + If the document does not exist, return the shard that would be + responsible. + + Args: + document (dict): Document body with "_key" field. + + Returns: + str: Shard ID. + + Raises: + CollectionResponsibleShardError: If retrieval fails. + + References: + - `get-the-responsible-shard-for-a-document `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/responsibleShard", + data=self.serializer.dumps(document), + ) + + def response_handler(resp: Response) -> str: + if resp.is_success: + body = self.deserializer.loads(resp.raw_body) + return cast(str, body["shardId"]) + raise CollectionResponsibleShardError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def shards(self, details: Optional[bool] = None) -> Result[Json]: + """Return collection shards and properties. + + Available only in a cluster setup. + + Args: + details (bool | None): If set to `True`, include responsible + servers for these shards. + + Returns: + dict: Collection shards. + + Raises: + CollectionShardsError: If retrieval fails. + + References: + - `get-the-shard-ids-of-a-collection `__ + """ # noqa: E501 + params: Params = {} + if details is not None: + params["details"] = details + + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/shards", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise CollectionShardsError(resp, request) + return cast(Json, self.deserializer.loads(resp.raw_body)["shards"]) + + return await self._executor.execute(request, response_handler) + + async def revision(self) -> Result[str]: + """Return collection revision. + + Returns: + str: Collection revision. + + Raises: + CollectionRevisionError: If retrieval fails. + + References: + - `get-the-collection-revision-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/revision", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise CollectionRevisionError(resp, request) + return cast(str, self.deserializer.loads(resp.raw_body)["revision"]) + + return await self._executor.execute(request, response_handler) + + async def checksum( + self, with_rev: Optional[bool] = None, with_data: Optional[bool] = None + ) -> Result[str]: + """Calculate collection checksum. + + Args: + with_rev (bool | None): Include document revisions in checksum calculation. + with_data (bool | None): Include document data in checksum calculation. + + Returns: + str: Collection checksum. + + Raises: + CollectionChecksumError: If retrieval fails. + + References: + - `get-the-collection-checksum `__ + """ # noqa: E501 + params: Params = {} + if with_rev is not None: + params["withRevision"] = with_rev + if with_data is not None: + params["withData"] = with_data + + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/checksum", + params=params, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise CollectionChecksumError(resp, request) + return cast(str, self.deserializer.loads(resp.raw_body)["checksum"]) + + return await self._executor.execute(request, response_handler) + async def has( self, document: str | Json, @@ -1444,9 +1752,9 @@ async def insert( def response_handler(resp: Response) -> bool | Json: if resp.is_success: - if silent is True: + if silent: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_BAD_PARAMETER: msg = ( @@ -1551,7 +1859,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) @@ -1641,7 +1949,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) @@ -1726,7 +2034,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) diff --git a/arangoasync/database.py b/arangoasync/database.py index c188290..578222f 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -22,6 +22,7 @@ AsyncJobListError, CollectionCreateError, CollectionDeleteError, + CollectionKeyGeneratorsError, CollectionListError, DatabaseCreateError, DatabaseDeleteError, @@ -695,6 +696,29 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def key_generators(self) -> Result[List[str]]: + """Returns the available key generators for collections. + + Returns: + list: List of available key generators. + + Raises: + CollectionKeyGeneratorsError: If retrieval fails. + + References: + - `get-the-available-key-generators `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/key-generators") + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise CollectionKeyGeneratorsError(resp, request) + return cast( + List[str], self.deserializer.loads(resp.raw_body)["keyGenerators"] + ) + + return await self._executor.execute(request, response_handler) + async def has_document( self, document: str | Json, diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index e052fd4..5de6ea4 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -183,10 +183,26 @@ class CollectionCreateError(ArangoServerError): """Failed to create collection.""" +class CollectionChecksumError(ArangoServerError): + """Failed to retrieve collection checksum.""" + + +class CollectionConfigureError(ArangoServerError): + """Failed to configure collection properties.""" + + +class CollectionCompactError(ArangoServerError): + """Failed to compact collection.""" + + class CollectionDeleteError(ArangoServerError): """Failed to delete collection.""" +class CollectionKeyGeneratorsError(ArangoServerError): + """Failed to retrieve key generators.""" + + class CollectionListError(ArangoServerError): """Failed to retrieve collections.""" @@ -195,18 +211,42 @@ class CollectionPropertiesError(ArangoServerError): """Failed to retrieve collection properties.""" -class ClientConnectionAbortedError(ArangoClientError): - """The connection was aborted.""" +class CollectionRecalculateCountError(ArangoServerError): + """Failed to recalculate document count.""" -class ClientConnectionError(ArangoClientError): - """The request was unable to reach the server.""" +class CollectionRenameError(ArangoServerError): + """Failed to rename collection.""" + + +class CollectionResponsibleShardError(ArangoServerError): + """Failed to retrieve responsible shard.""" + + +class CollectionRevisionError(ArangoServerError): + """Failed to retrieve collection revision.""" + + +class CollectionShardsError(ArangoServerError): + """Failed to retrieve collection shards.""" + + +class CollectionStatisticsError(ArangoServerError): + """Failed to retrieve collection statistics.""" class CollectionTruncateError(ArangoServerError): """Failed to truncate collection.""" +class ClientConnectionAbortedError(ArangoClientError): + """The connection was aborted.""" + + +class ClientConnectionError(ArangoClientError): + """The request was unable to reach the server.""" + + class CursorCloseError(ArangoServerError): """Failed to delete the cursor result from server.""" diff --git a/arangoasync/typings.py b/arangoasync/typings.py index 280e27e..d49411d 100644 --- a/arangoasync/typings.py +++ b/arangoasync/typings.py @@ -791,8 +791,6 @@ def compatibility_formatter(data: Json) -> Json: result["deleted"] = data["deleted"] if "syncByRevision" in data: result["sync_by_revision"] = data["syncByRevision"] - if "tempObjectId" in data: - result["temp_object_id"] = data["tempObjectId"] if "usesRevisionsAsDocumentIds" in data: result["rev_as_id"] = data["usesRevisionsAsDocumentIds"] if "isDisjoint" in data: @@ -819,6 +817,146 @@ def format(self, formatter: Optional[Formatter] = None) -> Json: return self.compatibility_formatter(self._data) +class CollectionStatistics(JsonWrapper): + """Statistical information about the collection. + + Example: + .. code-block:: json + + { + "figures" : { + "indexes" : { + "count" : 1, + "size" : 1234 + }, + "documentsSize" : 5601, + "cacheInUse" : false, + "cacheSize" : 0, + "cacheUsage" : 0, + "engine" : { + "documents" : 1, + "indexes" : [ + { + "type" : "primary", + "id" : 0, + "count" : 1 + } + ] + } + }, + "writeConcern" : 1, + "waitForSync" : false, + "usesRevisionsAsDocumentIds" : true, + "syncByRevision" : true, + "statusString" : "loaded", + "id" : "69123", + "isSmartChild" : false, + "schema" : null, + "name" : "products", + "type" : 2, + "status" : 3, + "count" : 1, + "cacheEnabled" : false, + "isSystem" : false, + "internalValidatorType" : 0, + "globallyUniqueId" : "hB7C02EE43DCE/69123", + "keyOptions" : { + "allowUserKeys" : true, + "type" : "traditional", + "lastValue" : 69129 + }, + "computedValues" : null, + "objectId" : "69124" + } + + References: + - `get-the-collection-statistics `__ + """ # noqa: E501 + + def __init__(self, data: Json) -> None: + super().__init__(data) + + @property + def figures(self) -> Json: + return cast(Json, self._data.get("figures")) + + @property + def write_concern(self) -> Optional[int]: + return self._data.get("writeConcern") + + @property + def wait_for_sync(self) -> Optional[bool]: + return self._data.get("waitForSync") + + @property + def use_revisions_as_document_ids(self) -> Optional[bool]: + return self._data.get("usesRevisionsAsDocumentIds") + + @property + def sync_by_revision(self) -> Optional[bool]: + return self._data.get("syncByRevision") + + @property + def status_string(self) -> Optional[str]: + return self._data.get("statusString") + + @property + def id(self) -> str: + return self._data["id"] # type: ignore[no-any-return] + + @property + def is_smart_child(self) -> bool: + return self._data["isSmartChild"] # type: ignore[no-any-return] + + @property + def schema(self) -> Optional[Json]: + return self._data.get("schema") + + @property + def name(self) -> str: + return self._data["name"] # type: ignore[no-any-return] + + @property + def type(self) -> CollectionType: + return CollectionType.from_int(self._data["type"]) + + @property + def status(self) -> CollectionStatus: + return CollectionStatus.from_int(self._data["status"]) + + @property + def count(self) -> int: + return self._data["count"] # type: ignore[no-any-return] + + @property + def cache_enabled(self) -> Optional[bool]: + return self._data.get("cacheEnabled") + + @property + def is_system(self) -> bool: + return self._data["isSystem"] # type: ignore[no-any-return] + + @property + def internal_validator_type(self) -> Optional[int]: + return self._data.get("internalValidatorType") + + @property + def globally_unique_id(self) -> str: + return self._data["globallyUniqueId"] # type: ignore[no-any-return] + + @property + def key_options(self) -> KeyOptions: + return KeyOptions(self._data["keyOptions"]) + + @property + def computed_values(self) -> Optional[Json]: + return self._data.get("computedValues") + + @property + def object_id(self) -> str: + return self._data["objectId"] # type: ignore[no-any-return] + + class IndexProperties(JsonWrapper): """Properties of an index. diff --git a/tests/test_collection.py b/tests/test_collection.py index d9214dd..fb8d7ba 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -4,7 +4,16 @@ from arangoasync.errno import DATA_SOURCE_NOT_FOUND, INDEX_NOT_FOUND from arangoasync.exceptions import ( + CollectionChecksumError, + CollectionCompactError, + CollectionConfigureError, CollectionPropertiesError, + CollectionRecalculateCountError, + CollectionRenameError, + CollectionResponsibleShardError, + CollectionRevisionError, + CollectionShardsError, + CollectionStatisticsError, CollectionTruncateError, DocumentCountError, IndexCreateError, @@ -13,6 +22,7 @@ IndexListError, IndexLoadError, ) +from tests.helpers import generate_col_name def test_collection_attributes(db, doc_col): @@ -22,7 +32,9 @@ def test_collection_attributes(db, doc_col): @pytest.mark.asyncio -async def test_collection_misc_methods(doc_col, bad_col): +async def test_collection_misc_methods(doc_col, bad_col, docs, cluster): + doc = await doc_col.insert(docs[0]) + # Properties properties = await doc_col.properties() assert properties.name == doc_col.name @@ -31,6 +43,75 @@ async def test_collection_misc_methods(doc_col, bad_col): with pytest.raises(CollectionPropertiesError): await bad_col.properties() + # Configure + wfs = not properties.wait_for_sync + new_properties = await doc_col.configure(wait_for_sync=wfs) + assert new_properties.wait_for_sync == wfs + with pytest.raises(CollectionConfigureError): + await bad_col.configure(wait_for_sync=wfs) + + # Statistics + statistics = await doc_col.statistics() + assert statistics.name == doc_col.name + assert "figures" in statistics + with pytest.raises(CollectionStatisticsError): + await bad_col.statistics() + + # Shards + if cluster: + shard = await doc_col.responsible_shard(doc) + assert isinstance(shard, str) + with pytest.raises(CollectionResponsibleShardError): + await bad_col.responsible_shard(doc) + shards = await doc_col.shards(details=True) + assert isinstance(shards, dict) + with pytest.raises(CollectionShardsError): + await bad_col.shards() + + # Revision + revision = await doc_col.revision() + assert isinstance(revision, str) + with pytest.raises(CollectionRevisionError): + await bad_col.revision() + + # Checksum + checksum = await doc_col.checksum(with_rev=True, with_data=True) + assert isinstance(checksum, str) + with pytest.raises(CollectionChecksumError): + await bad_col.checksum() + + # Recalculate count + with pytest.raises(CollectionRecalculateCountError): + await bad_col.recalculate_count() + await doc_col.recalculate_count() + + # Compact + with pytest.raises(CollectionCompactError): + await bad_col.compact() + res = await doc_col.compact() + assert res.name == doc_col.name + + +@pytest.mark.asyncio +async def test_collection_rename(cluster, db, bad_col, docs): + if cluster: + pytest.skip("Renaming collections is not supported in cluster deployments.") + + with pytest.raises(CollectionRenameError): + await bad_col.rename("new_name") + + col_name = generate_col_name() + new_name = generate_col_name() + try: + await db.create_collection(col_name) + col = db.collection(col_name) + await col.rename(new_name) + assert col.name == new_name + doc = await col.insert(docs[0]) + assert col.get_col_name(doc) == new_name + finally: + db.delete_collection(new_name, ignore_missing=True) + @pytest.mark.asyncio async def test_collection_index(doc_col, bad_col, cluster): diff --git a/tests/test_database.py b/tests/test_database.py index eb7daa3..7058ac1 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -1,11 +1,13 @@ import asyncio import pytest +from packaging import version from arangoasync.collection import StandardCollection from arangoasync.exceptions import ( CollectionCreateError, CollectionDeleteError, + CollectionKeyGeneratorsError, CollectionListError, DatabaseCreateError, DatabaseDeleteError, @@ -21,7 +23,7 @@ @pytest.mark.asyncio -async def test_database_misc_methods(sys_db, db, bad_db, cluster): +async def test_database_misc_methods(sys_db, db, bad_db, cluster, db_version): # Status status = await sys_db.status() assert status["server"] == "arango" @@ -50,11 +52,18 @@ async def test_database_misc_methods(sys_db, db, bad_db, cluster): await bad_db.reload_jwt_secrets() # Version - version = await sys_db.version() - assert version["version"].startswith("3.") + v = await sys_db.version() + assert v["version"].startswith("3.") with pytest.raises(ServerVersionError): await bad_db.version() + # key generators + if db_version >= version.parse("3.12.0"): + key_generators = await db.key_generators() + assert isinstance(key_generators, list) + with pytest.raises(CollectionKeyGeneratorsError): + await bad_db.key_generators() + @pytest.mark.asyncio async def test_create_drop_database( diff --git a/tests/test_typings.py b/tests/test_typings.py index fd04fa1..3b4e5e2 100644 --- a/tests/test_typings.py +++ b/tests/test_typings.py @@ -2,6 +2,7 @@ from arangoasync.typings import ( CollectionInfo, + CollectionStatistics, CollectionStatus, CollectionType, EdgeDefinitionOptions, @@ -386,3 +387,62 @@ def test_EdgeDefinitionOptions(): ) assert options.satellites == ["col1", "col2"] + + +def test_CollectionStatistics(): + data = { + "figures": { + "indexes": {"count": 1, "size": 1234}, + "documentsSize": 5601, + "cacheInUse": False, + "cacheSize": 0, + "cacheUsage": 0, + }, + "writeConcern": 1, + "waitForSync": False, + "usesRevisionsAsDocumentIds": True, + "syncByRevision": True, + "statusString": "loaded", + "id": "69123", + "isSmartChild": False, + "schema": None, + "name": "products", + "type": 2, + "status": 3, + "count": 1, + "cacheEnabled": False, + "isSystem": False, + "internalValidatorType": 0, + "globallyUniqueId": "hB7C02EE43DCE/69123", + "keyOptions": { + "allowUserKeys": True, + "type": "traditional", + "lastValue": 69129, + }, + "computedValues": None, + "objectId": "69124", + } + + stats = CollectionStatistics(data) + + assert stats.figures == data["figures"] + assert stats.write_concern == 1 + assert stats.wait_for_sync is False + assert stats.use_revisions_as_document_ids is True + assert stats.sync_by_revision is True + assert stats.status_string == "loaded" + assert stats.id == "69123" + assert stats.is_smart_child is False + assert stats.schema is None + assert stats.name == "products" + assert stats.type == CollectionType.DOCUMENT + assert stats.status == CollectionStatus.LOADED + assert stats.count == 1 + assert stats.cache_enabled is False + assert stats.is_system is False + assert stats.internal_validator_type == 0 + assert stats.globally_unique_id == "hB7C02EE43DCE/69123" + assert isinstance(stats.key_options, KeyOptions) + assert stats.key_options["type"] == "traditional" + assert stats.computed_values is None + assert stats.object_id == "69124" From 325c4e08268d22d91cf92793d0266f0edd9e1734 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 4 Aug 2025 13:53:36 +0800 Subject: [PATCH 04/14] Hot Backup API (#60) * Hot Backup API * Hot Backup docs * Hot Backup only tested in cluster * Hot Backup only tested for enterprise * Minimize backup tests --- arangoasync/backup.py | 295 ++++++++++++++++++++++++++++++++++++++ arangoasync/database.py | 10 ++ arangoasync/exceptions.py | 24 ++++ docs/backup.rst | 78 ++++++++++ docs/index.rst | 1 + docs/specs.rst | 3 + tests/test_backup.py | 57 ++++++++ 7 files changed, 468 insertions(+) create mode 100644 arangoasync/backup.py create mode 100644 docs/backup.rst create mode 100644 tests/test_backup.py diff --git a/arangoasync/backup.py b/arangoasync/backup.py new file mode 100644 index 0000000..75a26a6 --- /dev/null +++ b/arangoasync/backup.py @@ -0,0 +1,295 @@ +__all__ = ["Backup"] + +from numbers import Number +from typing import Optional, cast + +from arangoasync.exceptions import ( + BackupCreateError, + BackupDeleteError, + BackupDownloadError, + BackupGetError, + BackupRestoreError, + BackupUploadError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons + + +class Backup: + """Backup API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def get(self, backup_id: Optional[str] = None) -> Result[Json]: + """Return backup details. + + Args: + backup_id (str | None): If set, the returned list is restricted to the + backup with the given id. + + Returns: + dict: Backup details. + + Raises: + BackupGetError: If the operation fails. + + References: + - `list-backups `__ + """ # noqa: E501 + data: Json = {} + if backup_id is not None: + data["id"] = backup_id + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/list", + data=self.serializer.dumps(data) if data else None, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def create( + self, + label: Optional[str] = None, + allow_inconsistent: Optional[bool] = None, + force: Optional[bool] = None, + timeout: Optional[Number] = None, + ) -> Result[Json]: + """Create a backup when the global write lock can be obtained. + + Args: + label (str | None): Label for this backup. If not specified, a UUID is used. + allow_inconsistent (bool | None): Allow inconsistent backup when the global + transaction lock cannot be acquired before timeout. + force (bool | None): Forcefully abort all running transactions to ensure a + consistent backup when the global transaction lock cannot be + acquired before timeout. Default (and highly recommended) value + is `False`. + timeout (float | None): The time in seconds that the operation tries to + get a consistent snapshot. + + Returns: + dict: Backup information. + + Raises: + BackupCreateError: If the backup creation fails. + + References: + - `create-backup `__ + """ # noqa: E501 + data: Json = {} + if label is not None: + data["label"] = label + if allow_inconsistent is not None: + data["allowInconsistent"] = allow_inconsistent + if force is not None: + data["force"] = force + if timeout is not None: + data["timeout"] = timeout + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/create", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def restore(self, backup_id: str) -> Result[Json]: + """Restore a local backup. + + Args: + backup_id (str): Backup ID. + + Returns: + dict: Result of the restore operation. + + Raises: + BackupRestoreError: If the restore operation fails. + + References: + - `restore-backup `__ + """ # noqa: E501 + data: Json = {"id": backup_id} + request = Request( + method=Method.POST, + endpoint="/_admin/backup/restore", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupRestoreError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def delete(self, backup_id: str) -> None: + """Delete a backup. + + Args: + backup_id (str): Backup ID. + + Raises: + BackupDeleteError: If the delete operation fails. + + References: + - `delete-backup `__ + """ # noqa: E501 + data: Json = {"id": backup_id} + request = Request( + method=Method.POST, + endpoint="/_admin/backup/delete", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise BackupDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def upload( + self, + backup_id: Optional[str] = None, + repository: Optional[str] = None, + abort: Optional[bool] = None, + config: Optional[Json] = None, + upload_id: Optional[str] = None, + ) -> Result[Json]: + """Manage backup uploads. + + Args: + backup_id (str | None): Backup ID used for scheduling an upload. Mutually + exclusive with parameter **upload_id**. + repository (str | None): Remote repository URL(e.g. "local://tmp/backups"). + abort (str | None): If set to `True`, running upload is aborted. Used with + parameter **upload_id**. + config (dict | None): Remote repository configuration. Required for scheduling + an upload and mutually exclusive with parameter **upload_id**. + upload_id (str | None): Upload ID. Mutually exclusive with parameters + **backup_id**, **repository**, and **config**. + + Returns: + dict: Upload details. + + Raises: + BackupUploadError: If upload operation fails. + + References: + - `upload-a-backup-to-a-remote-repository `__ + """ # noqa: E501 + data: Json = {} + if upload_id is not None: + data["uploadId"] = upload_id + if backup_id is not None: + data["id"] = backup_id + if repository is not None: + data["remoteRepository"] = repository + if abort is not None: + data["abort"] = abort + if config is not None: + data["config"] = config + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/upload", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupUploadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def download( + self, + backup_id: Optional[str] = None, + repository: Optional[str] = None, + abort: Optional[bool] = None, + config: Optional[Json] = None, + download_id: Optional[str] = None, + ) -> Result[Json]: + """Manage backup downloads. + + Args: + backup_id (str | None): Backup ID used for scheduling a download. Mutually + exclusive with parameter **download_id**. + repository (str | None): Remote repository URL (e.g. "local://tmp/backups"). + abort (bool | None): If set to `True`, running download is aborted. + config (dict | None): Remote repository configuration. Required for scheduling + a download and mutually exclusive with parameter **download_id**. + download_id (str | None): Download ID. Mutually exclusive with parameters + **backup_id**, **repository**, and **config**. + + Returns: + dict: Download details. + + Raises: + BackupDownloadError: If the download operation fails. + + References: + - `download-a-backup-from-a-remote-repository `__ + """ # noqa: E501 + data: Json = {} + if download_id is not None: + data["downloadId"] = download_id + if backup_id is not None: + data["id"] = backup_id + if repository is not None: + data["remoteRepository"] = repository + if abort is not None: + data["abort"] = abort + if config is not None: + data["config"] = config + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/download", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupDownloadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/database.py b/arangoasync/database.py index 578222f..b048b4f 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -10,6 +10,7 @@ from warnings import warn from arangoasync.aql import AQL +from arangoasync.backup import Backup from arangoasync.collection import Collection, StandardCollection from arangoasync.connection import Connection from arangoasync.errno import HTTP_FORBIDDEN, HTTP_NOT_FOUND @@ -172,6 +173,15 @@ def aql(self) -> AQL: """ return AQL(self._executor) + @property + def backup(self) -> Backup: + """Return Backup API wrapper. + + Returns: + arangoasync.backup.Backup: Backup API wrapper. + """ + return Backup(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 5de6ea4..41644de 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -179,6 +179,30 @@ class AuthHeaderError(ArangoClientError): """The authentication header could not be determined.""" +class BackupCreateError(ArangoServerError): + """Failed to create a backup.""" + + +class BackupDeleteError(ArangoServerError): + """Failed to delete a backup.""" + + +class BackupDownloadError(ArangoServerError): + """Failed to download a backup from remote repository.""" + + +class BackupGetError(ArangoServerError): + """Failed to retrieve backup details.""" + + +class BackupRestoreError(ArangoServerError): + """Failed to restore from backup.""" + + +class BackupUploadError(ArangoServerError): + """Failed to upload a backup to remote repository.""" + + class CollectionCreateError(ArangoServerError): """Failed to create collection.""" diff --git a/docs/backup.rst b/docs/backup.rst new file mode 100644 index 0000000..de36041 --- /dev/null +++ b/docs/backup.rst @@ -0,0 +1,78 @@ +Backups +------- + +Hot Backups are near instantaneous consistent snapshots of an entire ArangoDB deployment. +This includes all databases, collections, indexes, Views, graphs, and users at any given time. +For more information, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import JwtToken + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + token = JwtToken.generate_token(LOGIN_SECRET) + + # Connect to "_system" database as root user. + db = await client.db( + "_system", auth_method="superuser", token=token, verify=True + ) + + # Get the backup API wrapper. + backup = db.backup + + # Create a backup. + result = await backup.create( + label="foo", + allow_inconsistent=True, + force=False, + timeout=1000 + ) + backup_id = result["id"] + + # Retrieve details on all backups + backups = await backup.get() + + # Retrieve details on a specific backup. + details = await backup.get(backup_id=backup_id) + + # Upload a backup to a remote repository. + result = await backup.upload( + backup_id=backup_id, + repository="local://tmp/backups", + config={"local": {"type": "local"}} + ) + upload_id = result["uploadId"] + + # Get status of an upload. + status = await backup.upload(upload_id=upload_id) + + # Abort an upload. + await backup.upload(upload_id=upload_id, abort=True) + + # Download a backup from a remote repository. + result = await backup.download( + backup_id=backup_id, + repository="local://tmp/backups", + config={"local": {"type": "local"}} + ) + download_id = result["downloadId"] + + # Get status of an download. + status = await backup.download(download_id=download_id) + + # Abort an download. + await backup.download(download_id=download_id, abort=True) + + # Restore from a backup. + await backup.restore(backup_id) + + # Delete a backup. + await backup.delete(backup_id) + +See :class:`arangoasync.backup.Backup` for API specification. diff --git a/docs/index.rst b/docs/index.rst index 375303c..1b361fd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -72,6 +72,7 @@ Contents certificates compression serialization + backup errors errno logging diff --git a/docs/specs.rst b/docs/specs.rst index 9983716..a2b982f 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -28,6 +28,9 @@ python-arango-async. .. automodule:: arangoasync.cursor :members: +.. automodule:: arangoasync.backup + :members: + .. automodule:: arangoasync.compression :members: diff --git a/tests/test_backup.py b/tests/test_backup.py new file mode 100644 index 0000000..d2fb07e --- /dev/null +++ b/tests/test_backup.py @@ -0,0 +1,57 @@ +import pytest +from packaging import version + +from arangoasync.client import ArangoClient +from arangoasync.exceptions import ( + BackupCreateError, + BackupDeleteError, + BackupDownloadError, + BackupGetError, + BackupRestoreError, + BackupUploadError, +) + + +@pytest.mark.asyncio +async def test_backup(url, sys_db_name, bad_db, token, enterprise, cluster, db_version): + if not enterprise: + pytest.skip("Backup API is only available in ArangoDB Enterprise Edition") + if not cluster: + pytest.skip("For simplicity, the backup API is only tested in cluster setups") + if db_version < version.parse("3.12.0"): + pytest.skip( + "For simplicity, the backup API is only tested in the latest versions" + ) + + with pytest.raises(BackupCreateError): + await bad_db.backup.create() + with pytest.raises(BackupGetError): + await bad_db.backup.get() + with pytest.raises(BackupRestoreError): + await bad_db.backup.restore("foobar") + with pytest.raises(BackupDeleteError): + await bad_db.backup.delete("foobar") + with pytest.raises(BackupUploadError): + await bad_db.backup.upload() + with pytest.raises(BackupDownloadError): + await bad_db.backup.download() + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + backup = db.backup + result = await backup.create() + backup_id = result["id"] + result = await backup.get() + assert "list" in result + result = await backup.restore(backup_id) + assert "previous" in result + config = {"local": {"type": "local"}} + result = await backup.upload(backup_id, repository="local://tmp", config=config) + assert "uploadId" in result + result = await backup.download( + backup_id, repository="local://tmp", config=config + ) + assert "downloadId" in result + await backup.delete(backup_id) From 88338776349da67c91751ae8ac0b9037288ce17c Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Tue, 5 Aug 2025 13:21:21 +0800 Subject: [PATCH 05/14] Adding /_api/import (#61) --- arangoasync/collection.py | 102 ++++++++++++++++++++++++++++++++++++++ docs/document.rst | 33 ++++++++++++ tests/test_collection.py | 18 +++++++ 3 files changed, 153 insertions(+) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index e3d12ee..52a9d9e 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -1578,6 +1578,108 @@ def response_handler( return await self._executor.execute(request, response_handler) + async def import_bulk( + self, + documents: bytes | str, + doc_type: Optional[str] = None, + complete: Optional[bool] = True, + details: Optional[bool] = True, + from_prefix: Optional[str] = None, + to_prefix: Optional[str] = None, + overwrite: Optional[bool] = None, + overwrite_collection_prefix: Optional[bool] = None, + on_duplicate: Optional[str] = None, + wait_for_sync: Optional[bool] = None, + ignore_missing: Optional[bool] = None, + ) -> Result[Json]: + """Load JSON data in bulk into ArangoDB. + + Args: + documents (bytes | str): String representation of the JSON data to import. + doc_type (str | None): Determines how the body of the request is interpreted. + Possible values: "", "documents", "array", "auto". + complete (bool | None): If set to `True`, the whole import fails if any error occurs. + Otherwise, the import continues even if some documents are invalid and cannot + be imported, skipping the problematic documents. + details (bool | None): If set to `True`, the result includes a `details` + attribute with information about documents that could not be imported. + from_prefix (str | None): String prefix prepended to the value of "_from" + field in each edge document inserted. For example, prefix "foo" + prepended to "_from": "bar" will result in "_from": "foo/bar". + Applies only to edge collections. + to_prefix (str | None): String prefix prepended to the value of "_to" + field in each edge document inserted. For example, prefix "foo" + prepended to "_to": "bar" will result in "_to": "foo/bar". + Applies only to edge collections. + overwrite (bool | None): If set to `True`, all existing documents are removed + prior to the import. Indexes are still preserved. + overwrite_collection_prefix (bool | None): Force the `fromPrefix` and + `toPrefix`, possibly replacing existing collection name prefixes. + on_duplicate (str | None): Action to take on unique key constraint violations + (for documents with "_key" fields). Allowed values are "error" (do + not import the new documents and count them as errors), "update" + (update the existing documents while preserving any fields missing + in the new ones), "replace" (replace the existing documents with + new ones), and "ignore" (do not import the new documents and count + them as ignored, as opposed to counting them as errors). Options + "update" and "replace" may fail on secondary unique key constraint + violations. + wait_for_sync (bool | None): Block until operation is synchronized to disk. + ignore_missing (bool | None): When importing JSON arrays of tabular data + (type parameter is omitted), the first line of the request body defines + the attribute keys and the subsequent lines the attribute values for each + document. Subsequent lines with a different number of elements than the + first line are not imported by default. You can enable this option to + import them anyway. For the missing elements, the document attributes + are omitted. Excess elements are ignored. + + Returns: + dict: Result of the import operation. + + Raises: + DocumentInsertError: If import fails. + + References: + - `import-json-data-as-documents `__ + """ # noqa: E501 + params: Params = dict() + params["collection"] = self.name + if doc_type is not None: + params["type"] = doc_type + if complete is not None: + params["complete"] = complete + if details is not None: + params["details"] = details + if from_prefix is not None: + params["fromPrefix"] = from_prefix + if to_prefix is not None: + params["toPrefix"] = to_prefix + if overwrite is not None: + params["overwrite"] = overwrite + if overwrite_collection_prefix is not None: + params["overwriteCollectionPrefix"] = overwrite_collection_prefix + if on_duplicate is not None: + params["onDuplicate"] = on_duplicate + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if ignore_missing is not None: + params["ignoreMissing"] = ignore_missing + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise DocumentInsertError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + request = Request( + method=Method.POST, + endpoint="/_api/import", + data=documents, + params=params, + ) + + return await self._executor.execute(request, response_handler) + class StandardCollection(Collection[T, U, V]): """Standard collection API wrapper. diff --git a/docs/document.rst b/docs/document.rst index c0764e8..47619db 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -150,6 +150,39 @@ Standard documents are managed via collection API wrapper: # Delete one or more matching documents. await students.delete_match({"first": "Emma"}) +Importing documents in bulk is faster when using specialized methods. Suppose +our data is in a file containing JSON Lines (JSONL) format. Each line is expected +to be one JSON object. Example of a "students.jsonl" file: + +.. code-block:: json + + {"_key":"john","name":"John Smith","age":35} + {"_key":"katie","name":"Katie Foster","age":28} + +To import this file into the "students" collection, we can use the `import_bulk` API: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + import aiofiles + + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + # Read the JSONL file asynchronously. + async with aiofiles.open('students.jsonl', mode='r') as f: + documents = await f.read() + + # Import documents in bulk. + result = await students.import_bulk(documents, doc_type="documents") + You can manage documents via database API wrappers also, but only simple operations (i.e. get, insert, update, replace, delete) are supported and you must provide document IDs instead of keys: diff --git a/tests/test_collection.py b/tests/test_collection.py index fb8d7ba..2dc4c42 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -16,6 +16,7 @@ CollectionStatisticsError, CollectionTruncateError, DocumentCountError, + DocumentInsertError, IndexCreateError, IndexDeleteError, IndexGetError, @@ -263,3 +264,20 @@ async def test_collection_truncate_count(docs, doc_col, bad_col): await doc_col.truncate(wait_for_sync=True, compact=True) cnt = await doc_col.count() assert cnt == 0 + + +@pytest.mark.asyncio +async def test_collection_import_bulk(doc_col, bad_col, docs): + documents = "\n".join(doc_col.serializer.dumps(doc) for doc in docs) + + # Test errors + with pytest.raises(DocumentInsertError): + await bad_col.import_bulk(documents, doc_type="documents") + + # Insert documents in bulk + result = await doc_col.import_bulk(documents, doc_type="documents") + + # Verify the documents were inserted + count = await doc_col.count() + assert count == len(docs) + assert result["created"] == count From 8155b952b43b61c0e62bd4dc6bc6cc193f2dd557 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Fri, 8 Aug 2025 13:29:30 +0800 Subject: [PATCH 06/14] Tasks API (#62) * Adding support for /_api/tasks * Adding docs for /_api/tasks --- arangoasync/database.py | 146 ++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 16 +++++ docs/index.rst | 1 + docs/task.rst | 51 +++++++++++++ tests/conftest.py | 13 ++++ tests/helpers.py | 18 +++++ tests/test_task.py | 79 +++++++++++++++++++++ 7 files changed, 324 insertions(+) create mode 100644 docs/task.rst create mode 100644 tests/test_task.py diff --git a/arangoasync/database.py b/arangoasync/database.py index b048b4f..f2b03ee 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -40,6 +40,10 @@ PermissionUpdateError, ServerStatusError, ServerVersionError, + TaskCreateError, + TaskDeleteError, + TaskGetError, + TaskListError, TransactionAbortError, TransactionCommitError, TransactionExecuteError, @@ -2193,6 +2197,148 @@ def response_handler(resp: Response) -> Json: return await self._executor.execute(request, response_handler) + async def tasks(self) -> Result[Jsons]: + """Fetches all existing tasks from the server. + + Returns: + list: List of currently active server tasks. + + Raises: + TaskListError: If the list cannot be retrieved. + + References: + - `list-all-tasks `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/tasks") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise TaskListError(resp, request) + result: Jsons = self.deserializer.loads_many(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def task(self, task_id: str) -> Result[Json]: + """Return the details of an active server task. + + Args: + task_id (str) -> Server task ID. + + Returns: + dict: Details of the server task. + + Raises: + TaskGetError: If the task details cannot be retrieved. + + References: + - `get-a-task `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise TaskGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def create_task( + self, + command: str, + task_id: Optional[str] = None, + name: Optional[str] = None, + offset: Optional[int] = None, + params: Optional[Json] = None, + period: Optional[int] = None, + ) -> Result[Json]: + """Create a new task. + + Args: + command (str): The JavaScript code to be executed. + task_id (str | None): Optional task ID. If not provided, the server will + generate a unique ID. + name (str | None): The name of the task. + offset (int | None): The offset in seconds after which the task should + start executing. + params (dict | None): Parameters to be passed to the command. + period (int | None): The number of seconds between the executions. + + Returns: + dict: Details of the created task. + + Raises: + TaskCreateError: If the task cannot be created. + + References: + - `create-a-task `__ + - `create-a-task-with-id `__ + """ # noqa: E501 + data: Json = {"command": command} + if name is not None: + data["name"] = name + if offset is not None: + data["offset"] = offset + if params is not None: + data["params"] = params + if period is not None: + data["period"] = period + + if task_id is None: + request = Request( + method=Method.POST, + endpoint="/_api/tasks", + data=self.serializer.dumps(data), + ) + else: + request = Request( + method=Method.PUT, + endpoint=f"/_api/tasks/{task_id}", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise TaskCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def delete_task( + self, + task_id: str, + ignore_missing: bool = False, + ) -> Result[bool]: + """Delete a server task. + + Args: + task_id (str): Task ID. + ignore_missing (bool): If `True`, do not raise an exception if the + task does not exist. + + Returns: + bool: `True` if the task was deleted successfully, `False` if the + task was not found and **ignore_missing** was set to `True`. + + Raises: + TaskDeleteError: If the operation fails. + + References: + - `delete-a-task `__ + """ # noqa: E501 + request = Request(method=Method.DELETE, endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND and ignore_missing: + return False + raise TaskDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 41644de..5ca333a 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -451,6 +451,22 @@ class SortValidationError(ArangoClientError): """Invalid sort parameters.""" +class TaskCreateError(ArangoServerError): + """Failed to create server task.""" + + +class TaskDeleteError(ArangoServerError): + """Failed to delete server task.""" + + +class TaskGetError(ArangoServerError): + """Failed to retrieve server task details.""" + + +class TaskListError(ArangoServerError): + """Failed to retrieve server tasks.""" + + class TransactionAbortError(ArangoServerError): """Failed to abort transaction.""" diff --git a/docs/index.rst b/docs/index.rst index 1b361fd..41eaeee 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -73,6 +73,7 @@ Contents compression serialization backup + task errors errno logging diff --git a/docs/task.rst b/docs/task.rst new file mode 100644 index 0000000..2490507 --- /dev/null +++ b/docs/task.rst @@ -0,0 +1,51 @@ +Tasks +----- + +ArangoDB can schedule user-defined Javascript snippets as one-time or periodic +(re-scheduled after each execution) tasks. Tasks are executed in the context of +the database they are defined in. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Create a new task which simply prints parameters. + await db.create_task( + name="test_task", + command=""" + var task = function(params){ + var db = require('@arangodb'); + db.print(params); + } + task(params); + """, + params={"foo": "bar"}, + offset=300, + period=10, + task_id="001" + ) + + # List all active tasks + tasks = await db.tasks() + + # Retrieve details of a task by ID. + details = await db.task("001") + + # Delete an existing task by ID. + await db.delete_task('001', ignore_missing=True) + + +.. note:: + When deleting a database, any tasks that were initialized under its context + remain active. It is therefore advisable to delete any running tasks before + deleting the database. diff --git a/tests/conftest.py b/tests/conftest.py index 98d75de..66e5a9d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -256,6 +256,19 @@ async def teardown(): verify=False, ) + # Remove all tasks + test_tasks = [ + task + for task in await sys_db.tasks() + if task["name"].startswith("test_task") + ] + await asyncio.gather( + *( + sys_db.delete_task(task["id"], ignore_missing=True) + for task in test_tasks + ) + ) + # Remove all test users. tst_users = [ user["user"] diff --git a/tests/helpers.py b/tests/helpers.py index f2f63f7..dfaae4d 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -62,3 +62,21 @@ def generate_analyzer_name(): str: Random analyzer name. """ return f"test_analyzer_{uuid4().hex}" + + +def generate_task_name(): + """Generate and return a random task name. + + Returns: + str: Random task name. + """ + return f"test_task_{uuid4().hex}" + + +def generate_task_id(): + """Generate and return a random task ID. + + Returns: + str: Random task ID + """ + return f"test_task_id_{uuid4().hex}" diff --git a/tests/test_task.py b/tests/test_task.py new file mode 100644 index 0000000..4e1aee6 --- /dev/null +++ b/tests/test_task.py @@ -0,0 +1,79 @@ +import pytest + +from arangoasync.exceptions import ( + TaskCreateError, + TaskDeleteError, + TaskGetError, + TaskListError, +) +from tests.helpers import generate_task_id, generate_task_name + + +@pytest.mark.asyncio +async def test_task_management(sys_db, bad_db): + # This test intentionally uses the system database because cleaning up tasks is + # easier there. + + test_command = 'require("@arangodb").print(params);' + + # Test errors + with pytest.raises(TaskCreateError): + await bad_db.create_task(command=test_command) + with pytest.raises(TaskGetError): + await bad_db.task("non_existent_task_id") + with pytest.raises(TaskListError): + await bad_db.tasks() + with pytest.raises(TaskDeleteError): + await bad_db.delete_task("non_existent_task_id") + + # Create a task with a random ID + task_name = generate_task_name() + new_task = await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + offset=1, + ) + assert new_task["name"] == task_name + task_id = new_task["id"] + assert await sys_db.task(task_id) == new_task + + # Delete task + assert await sys_db.delete_task(task_id) is True + + # Create a task with a specific ID + task_name = generate_task_name() + task_id = generate_task_id() + new_task = await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + offset=1, + period=10, + task_id=task_id, + ) + assert new_task["name"] == task_name + assert new_task["id"] == task_id + + # Try to create a duplicate task + with pytest.raises(TaskCreateError): + await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + task_id=task_id, + ) + + # Test get missing task + with pytest.raises(TaskGetError): + await sys_db.task(generate_task_id()) + + # Test list tasks + tasks = await sys_db.tasks() + assert len(tasks) == 1 + + # Delete tasks + assert await sys_db.delete_task(task_id) is True + assert await sys_db.delete_task(task_id, ignore_missing=True) is False + with pytest.raises(TaskDeleteError): + await sys_db.delete_task(task_id) From a171df7b449cea79a2a2ce3d41ad0052261e0d6c Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Fri, 8 Aug 2025 14:39:03 +0800 Subject: [PATCH 07/14] Adding security API (#63) --- arangoasync/database.py | 78 +++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 12 ++++++ docs/certificates.rst | 22 +++++++++++ docs/migration.rst | 2 +- tests/test_client.py | 14 +++++++ 5 files changed, 127 insertions(+), 1 deletion(-) diff --git a/arangoasync/database.py b/arangoasync/database.py index f2b03ee..b338b56 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -38,7 +38,10 @@ PermissionListError, PermissionResetError, PermissionUpdateError, + ServerEncryptionError, ServerStatusError, + ServerTLSError, + ServerTLSReloadError, ServerVersionError, TaskCreateError, TaskDeleteError, @@ -2072,6 +2075,81 @@ def response_handler(resp: Response) -> Json: return await self._executor.execute(request, response_handler) + async def tls(self) -> Result[Json]: + """Return TLS data (keyfile, clientCA). + + This API requires authentication. + + Returns: + dict: dict containing the following components: + - keyfile: Information about the key file. + - clientCA: Information about the Certificate Authority (CA) for client certificate verification. + + Raises: + ServerTLSError: If the operation fails. + + References: + - `get-the-tls-data `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + + async def reload_tls(self) -> Result[Json]: + """Reload TLS data (keyfile, clientCA). + + This is a protected API and can only be executed with superuser rights. + + Returns: + dict: New TLS data. + + Raises: + ServerTLSReloadError: If the operation fails. + + References: + - `reload-the-tls-data `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSReloadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + + async def encryption(self) -> Result[Json]: + """Rotate the user-supplied keys for encryption. + + This is a protected API and can only be executed with superuser rights. + This API is not available on Coordinator nodes. + + Returns: + dict: Encryption keys. + + Raises: + ServerEncryptionError: If the operation fails. + + References: + - `rotate-the-encryption-keys `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/server/encryption") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEncryptionError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + async def list_transactions(self) -> Result[Jsons]: """List all currently running stream transactions. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 5ca333a..5e2844a 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -435,6 +435,10 @@ class SerializationError(ArangoClientError): """Failed to serialize the request.""" +class ServerEncryptionError(ArangoServerError): + """Failed to reload user-defined encryption keys.""" + + class ServerConnectionError(ArangoServerError): """Failed to connect to ArangoDB server.""" @@ -443,6 +447,14 @@ class ServerStatusError(ArangoServerError): """Failed to retrieve server status.""" +class ServerTLSError(ArangoServerError): + """Failed to retrieve TLS data.""" + + +class ServerTLSReloadError(ArangoServerError): + """Failed to reload TLS.""" + + class ServerVersionError(ArangoServerError): """Failed to retrieve server version.""" diff --git a/docs/certificates.rst b/docs/certificates.rst index c0665fa..ee49e13 100644 --- a/docs/certificates.rst +++ b/docs/certificates.rst @@ -108,3 +108,25 @@ Use a client certificate chain If you want to have fine-grained control over the HTTP connection, you should define your HTTP client as described in the :ref:`HTTP` section. + +Security features +================= + +See the `ArangoDB Manual`_ for more information on security features. + +**Example:** + +.. code-block:: python + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + + # Get TLS data + tls = await db.tls() + + # Reload TLS data + tls = await db.reload_tls() + +.. _ArangoDB Manual: https://docs.arangodb.com/stable/develop/http-api/security/ diff --git a/docs/migration.rst b/docs/migration.rst index f26e7d6..7c2427e 100644 --- a/docs/migration.rst +++ b/docs/migration.rst @@ -2,7 +2,7 @@ Coming from python-arango ------------------------- Generally, migrating from `python-arango`_ should be a smooth transition. For the most part, the API is similar, -but there are a few things to note._ +but there are a few things to note. Helpers ======= diff --git a/tests/test_client.py b/tests/test_client.py index 6210412..cb488a7 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -3,6 +3,7 @@ from arangoasync.auth import JwtToken from arangoasync.client import ArangoClient from arangoasync.compression import DefaultCompressionManager +from arangoasync.exceptions import ServerEncryptionError from arangoasync.http import DefaultHTTPClient from arangoasync.resolver import DefaultHostResolver, RoundRobinHostResolver from arangoasync.version import __version__ @@ -131,6 +132,19 @@ async def test_client_jwt_superuser_auth( await db.jwt_secrets() await db.reload_jwt_secrets() + # Get TLS data + tls = await db.tls() + assert isinstance(tls, dict) + + # Reload TLS data + tls = await db.reload_tls() + assert isinstance(tls, dict) + + # Rotate + with pytest.raises(ServerEncryptionError): + # Not allowed on coordinators + await db.encryption() + # token missing async with ArangoClient(hosts=url) as client: with pytest.raises(ValueError): From 7586d09f7232f6b0fbda3f7c513047abbcc4d074 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 10 Aug 2025 11:16:27 +0800 Subject: [PATCH 08/14] Cluster API (#64) * Adding cluster API * Adding rebalance operations * Adding cluster documentation --- arangoasync/cluster.py | 451 ++++++++++++++++++++++++++++++++++++++ arangoasync/database.py | 10 + arangoasync/exceptions.py | 28 +++ docs/cluster.rst | 53 +++++ docs/index.rst | 1 + docs/specs.rst | 3 + tests/test_cluster.py | 101 +++++++++ 7 files changed, 647 insertions(+) create mode 100644 arangoasync/cluster.py create mode 100644 docs/cluster.rst create mode 100644 tests/test_cluster.py diff --git a/arangoasync/cluster.py b/arangoasync/cluster.py new file mode 100644 index 0000000..ce33b92 --- /dev/null +++ b/arangoasync/cluster.py @@ -0,0 +1,451 @@ +__all__ = ["Cluster"] + +from typing import List, Optional, cast + +from arangoasync.exceptions import ( + ClusterEndpointsError, + ClusterHealthError, + ClusterMaintenanceModeError, + ClusterRebalanceError, + ClusterServerIDError, + ClusterServerRoleError, + ClusterStatisticsError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params + + +class Cluster: + """Cluster-specific endpoints.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def health(self) -> Result[Json]: + """Queries the health of the cluster. + + Returns: + dict: Health status of the cluster. + + Raises: + ClusterHealthError: If retrieval fails. + + References: + - `get-the-cluster-health `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/cluster/health", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterHealthError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def statistics(self, db_server: str) -> Result[Json]: + """Queries the statistics of the given DB-Server. + + Args: + db_server (str): The ID of the DB-Server. + + Returns: + dict: Statistics of the DB-Server. + + Raises: + ClusterStatisticsError: If retrieval fails. + + References: + - `get-the-statistics-of-a-db-server `__ + """ # noqa: E501 + params: Params = {"DBserver": db_server} + + request = Request( + method=Method.GET, + endpoint="/_admin/cluster/statistics", + prefix_needed=False, + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterStatisticsError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def endpoints(self) -> Result[List[str]]: + """Fetch all coordinator endpoints. + + Returns: + list: List of coordinator endpoints. + + Raises: + ClusterEndpointsError: If retrieval fails. + + References: + - `list-all-coordinator-endpoints `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/cluster/endpoints", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise ClusterEndpointsError(resp, request) + body: Json = self.deserializer.loads(resp.raw_body) + return [item["endpoint"] for item in body["endpoints"]] + + return await self._executor.execute(request, response_handler) + + async def server_id(self) -> Result[str]: + """Get the ID of the current server. + + Returns: + str: Server ID. + + Raises: + ClusterServerIDError: If retrieval fails. + + References: + - `get-the-server-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/id", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ClusterServerIDError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["id"]) + + return await self._executor.execute(request, response_handler) + + async def server_role(self) -> Result[str]: + """Get the role of the current server + + Returns: + str: Server role. Possible values: "SINGLE", "COORDINATOR", "PRIMARY", "SECONDARY", "AGENT", "UNDEFINED". + + Raises: + ClusterServerRoleError: If retrieval fails. + + References: + - `get-the-server-role `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/role", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ClusterServerRoleError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["role"]) + + return await self._executor.execute(request, response_handler) + + async def toggle_maintenance_mode(self, mode: str) -> Result[Json]: + """Enable or disable the cluster supervision (agency) maintenance mode. + + Args: + mode (str): Maintenance mode. Allowed values are "on" or "off". + + Returns: + dict: Result of the operation. + + Raises: + ClusterMaintenanceModeError: If the toggle operation fails. + + References: + - `toggle-cluster-maintenance-mode `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/cluster/maintenance", + prefix_needed=False, + data=f'"{mode}"', + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def server_maintenance_mode(self, server_id: str) -> Result[Json]: + """Check whether the specified DB-Server is in maintenance mode and until when. + + Args: + server_id (str): Server ID. + + Returns: + dict: Maintenance status for the given server. + + Raises: + ClusterMaintenanceModeError: If retrieval fails. + + References: + - `get-the-maintenance-status-of-a-db-server `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_admin/cluster/maintenance/{server_id}", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def toggle_server_maintenance_mode( + self, server_id: str, mode: str, timeout: Optional[int] = None + ) -> None: + """Enable or disable the maintenance mode for the given server. + + Args: + server_id (str): Server ID. + mode (str): Maintenance mode. Allowed values are "normal" and "maintenance". + timeout (int | None): After how many seconds the maintenance mode shall automatically end. + + Raises: + ClusterMaintenanceModeError: If the operation fails. + + References: + - `set-the-maintenance-status-of-a-db-server `__ + """ # noqa: E501 + data: Json = {"mode": mode} + if timeout is not None: + data["timeout"] = timeout + + request = Request( + method=Method.PUT, + endpoint=f"/_admin/cluster/maintenance/{server_id}", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + + await self._executor.execute(request, response_handler) + + async def calculate_imbalance(self) -> Result[Json]: + """Computes the current cluster imbalance and returns the result. + + Returns: + dict: Cluster imbalance information. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `get-the-current-cluster-imbalance `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/cluster/rebalance") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def calculate_rebalance_plan( + self, + databases_excluded: Optional[List[str]] = None, + exclude_system_collections: Optional[bool] = None, + leader_changes: Optional[bool] = None, + maximum_number_of_moves: Optional[int] = None, + move_followers: Optional[bool] = None, + move_leaders: Optional[bool] = None, + pi_factor: Optional[float] = None, + version: int = 1, + ) -> Result[Json]: + """Compute a set of move shard operations to improve balance. + + Args: + databases_excluded (list | None): List of database names to be excluded from + the analysis. + exclude_system_collections (bool | None): Ignore system collections in the + rebalance plan. + leader_changes (bool | None): Allow leader changes without moving data. + maximum_number_of_moves (int | None): Maximum number of moves to be computed. + move_followers (bool | None): Allow moving shard followers. + move_leaders (bool | None): Allow moving shard leaders. + pi_factor (float | None): A weighting factor that should remain untouched. + version (int): Must be set to 1. + + Returns: + dict: Cluster rebalance plan. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `compute-a-set-of-move-shard-operations-to-improve-balance `__ + """ # noqa: E501 + data: Json = dict(version=version) + if databases_excluded is not None: + data["databasesExcluded"] = databases_excluded + if exclude_system_collections is not None: + data["excludeSystemCollections"] = exclude_system_collections + if leader_changes is not None: + data["leaderChanges"] = leader_changes + if maximum_number_of_moves is not None: + data["maximumNumberOfMoves"] = maximum_number_of_moves + if move_followers is not None: + data["moveFollowers"] = move_followers + if move_leaders is not None: + data["moveLeaders"] = move_leaders + if pi_factor is not None: + data["piFactor"] = pi_factor + + request = Request( + method=Method.POST, + endpoint="/_admin/cluster/rebalance", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def rebalance( + self, + databases_excluded: Optional[List[str]] = None, + exclude_system_collections: Optional[bool] = None, + leader_changes: Optional[bool] = None, + maximum_number_of_moves: Optional[int] = None, + move_followers: Optional[bool] = None, + move_leaders: Optional[bool] = None, + pi_factor: Optional[float] = None, + version: int = 1, + ) -> Result[Json]: + """Compute and execute a set of move shard operations to improve balance. + + Args: + databases_excluded (list | None): List of database names to be excluded from + the analysis. + exclude_system_collections (bool | None): Ignore system collections in the + rebalance plan. + leader_changes (bool | None): Allow leader changes without moving data. + maximum_number_of_moves (int | None): Maximum number of moves to be computed. + move_followers (bool | None): Allow moving shard followers. + move_leaders (bool | None): Allow moving shard leaders. + pi_factor (float | None): A weighting factor that should remain untouched. + version (int): Must be set to 1. + + Returns: + dict: Cluster rebalance plan. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `compute-and-execute-a-set-of-move-shard-operations-to-improve-balance `__ + """ # noqa: E501 + data: Json = dict(version=version) + if databases_excluded is not None: + data["databasesExcluded"] = databases_excluded + if exclude_system_collections is not None: + data["excludeSystemCollections"] = exclude_system_collections + if leader_changes is not None: + data["leaderChanges"] = leader_changes + if maximum_number_of_moves is not None: + data["maximumNumberOfMoves"] = maximum_number_of_moves + if move_followers is not None: + data["moveFollowers"] = move_followers + if move_leaders is not None: + data["moveLeaders"] = move_leaders + if pi_factor is not None: + data["piFactor"] = pi_factor + + request = Request( + method=Method.PUT, + endpoint="/_admin/cluster/rebalance", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def execute_rebalance_plan( + self, + moves: List[Json], + version: int = 1, + ) -> Result[int]: + """Execute a set of move shard operations. + + Args: + moves (list): List of move shard operations to be executed. + version (int): Must be set to 1. + + Returns: + int: Indicates whether the methods have been accepted and scheduled for execution. + + Raises: + ClusterRebalanceError: If the execution fails. + + References: + - `execute-a-set-of-move-shard-operations `__ + """ # noqa: E501 + data: Json = dict(version=version, moves=moves) + + request = Request( + method=Method.POST, + endpoint="/_admin/cluster/rebalance/execute", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> int: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: int = self.deserializer.loads(resp.raw_body)["code"] + return result + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/database.py b/arangoasync/database.py index b338b56..d0ddbbb 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -11,6 +11,7 @@ from arangoasync.aql import AQL from arangoasync.backup import Backup +from arangoasync.cluster import Cluster from arangoasync.collection import Collection, StandardCollection from arangoasync.connection import Connection from arangoasync.errno import HTTP_FORBIDDEN, HTTP_NOT_FOUND @@ -189,6 +190,15 @@ def backup(self) -> Backup: """ return Backup(self._executor) + @property + def cluster(self) -> Cluster: + """Return Cluster API wrapper. + + Returns: + arangoasync.cluster.Cluster: Cluster API wrapper. + """ + return Cluster(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 5e2844a..bfd30d7 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -271,6 +271,34 @@ class ClientConnectionError(ArangoClientError): """The request was unable to reach the server.""" +class ClusterEndpointsError(ArangoServerError): + """Failed to retrieve coordinator endpoints.""" + + +class ClusterHealthError(ArangoServerError): + """Failed to retrieve cluster health.""" + + +class ClusterMaintenanceModeError(ArangoServerError): + """Failed to enable/disable cluster supervision maintenance mode.""" + + +class ClusterRebalanceError(ArangoServerError): + """Failed to execute cluster rebalancing operation.""" + + +class ClusterServerRoleError(ArangoServerError): + """Failed to retrieve server role in a cluster.""" + + +class ClusterServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + +class ClusterStatisticsError(ArangoServerError): + """Failed to retrieve DB-Server statistics.""" + + class CursorCloseError(ArangoServerError): """Failed to delete the cursor result from server.""" diff --git a/docs/cluster.rst b/docs/cluster.rst new file mode 100644 index 0000000..c5e58aa --- /dev/null +++ b/docs/cluster.rst @@ -0,0 +1,53 @@ +Clusters +-------- + +The cluster-specific API lets you get information about individual +cluster nodes and the cluster as a whole, as well as monitor and +administrate cluster deployments. For more information on the design +and architecture, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "_system" database as root user. + db = await client.db("_system", auth=auth) + cluster = db.cluster + + # Cluster health + health = await cluster.health() + + # DB-Server statistics + db_server = "PRMR-2716c9d0-4b22-4c66-ba3d-f9cd3143e52b" + stats = await cluster.statistics(db_server) + + # Cluster endpoints + endpoints = await cluster.endpoints() + + # Cluster server ID and role + server_id = await cluster.server_id() + server_role = await cluster.server_role() + + # Maintenance mode + await cluster.toggle_maintenance_mode("on") + await cluster.toggle_maintenance_mode("off") + await cluster.toggle_server_maintenance_mode( + db_server, "maintenance", timeout=30 + ) + status = await cluster.server_maintenance_mode(db_server) + await cluster.toggle_server_maintenance_mode(db_server, "normal") + + # Rebalance + result = await cluster.calculate_imbalance() + result = await cluster.calculate_rebalance_plan() + result = await cluster.execute_rebalance_plan(moves=[]) + result = await cluster.rebalance() + +See :class:`arangoasync.cluster.Cluster` for API specification. diff --git a/docs/index.rst b/docs/index.rst index 41eaeee..65eefd3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -46,6 +46,7 @@ Contents transaction view analyzer + cluster **API Executions** diff --git a/docs/specs.rst b/docs/specs.rst index a2b982f..763af9c 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -31,6 +31,9 @@ python-arango-async. .. automodule:: arangoasync.backup :members: +.. automodule:: arangoasync.cluster + :members: + .. automodule:: arangoasync.compression :members: diff --git a/tests/test_cluster.py b/tests/test_cluster.py new file mode 100644 index 0000000..d5b0b75 --- /dev/null +++ b/tests/test_cluster.py @@ -0,0 +1,101 @@ +import pytest +from packaging import version + +from arangoasync.client import ArangoClient +from arangoasync.exceptions import ( + ClusterEndpointsError, + ClusterHealthError, + ClusterMaintenanceModeError, + ClusterRebalanceError, + ClusterServerIDError, + ClusterServerRoleError, + ClusterStatisticsError, +) + + +@pytest.mark.asyncio +async def test_cluster( + url, sys_db_name, bad_db, token, enterprise, cluster, db_version +): + if not cluster: + pytest.skip("Cluster API is only tested in cluster setups") + if not enterprise or db_version < version.parse("3.12.0"): + pytest.skip( + "For simplicity, the cluster API is only tested in the latest versions" + ) + + # Test errors + with pytest.raises(ClusterHealthError): + await bad_db.cluster.health() + with pytest.raises(ClusterStatisticsError): + await bad_db.cluster.statistics("foo") + with pytest.raises(ClusterEndpointsError): + await bad_db.cluster.endpoints() + with pytest.raises(ClusterServerIDError): + await bad_db.cluster.server_id() + with pytest.raises(ClusterServerRoleError): + await bad_db.cluster.server_role() + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.toggle_maintenance_mode("on") + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.toggle_server_maintenance_mode("PRMR0001", "normal") + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.server_maintenance_mode("PRMR0001") + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.calculate_imbalance() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.rebalance() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.calculate_rebalance_plan() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.execute_rebalance_plan(moves=[]) + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + cluster = db.cluster + + # Cluster health + health = await cluster.health() + assert "Health" in health + + # DB-Server statistics + db_server = None + for server in health["Health"]: + if server.startswith("PRMR"): + db_server = server + break + assert db_server is not None, f"No DB server found in {health}" + stats = await cluster.statistics(db_server) + assert "enabled" in stats + + # Cluster endpoints + endpoints = await cluster.endpoints() + assert len(endpoints) > 0 + + # Cluster server ID and role + server_id = await cluster.server_id() + assert isinstance(server_id, str) + server_role = await cluster.server_role() + assert isinstance(server_role, str) + + # Maintenance mode + await cluster.toggle_maintenance_mode("on") + await cluster.toggle_maintenance_mode("off") + await cluster.toggle_server_maintenance_mode( + db_server, "maintenance", timeout=30 + ) + status = await cluster.server_maintenance_mode(db_server) + assert isinstance(status, dict) + await cluster.toggle_server_maintenance_mode(db_server, "normal") + + # Rebalance + result = await cluster.calculate_imbalance() + assert isinstance(result, dict) + result = await cluster.calculate_rebalance_plan() + assert isinstance(result, dict) + result = await cluster.execute_rebalance_plan(moves=[]) + assert result == 200 + result = await cluster.rebalance() + assert isinstance(result, dict) From 114b45f1d6df144f515ea64c45f0fada3b4fac8b Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Tue, 12 Aug 2025 13:57:08 +0800 Subject: [PATCH 09/14] Foxx API (#65) * Adding foxx API * Finishing foxx API * Foxx documentation --- arangoasync/database.py | 10 + arangoasync/exceptions.py | 84 ++++ arangoasync/foxx.py | 829 ++++++++++++++++++++++++++++++++++++++ arangoasync/request.py | 10 +- docs/document.rst | 2 +- docs/foxx.rst | 147 +++++++ docs/index.rst | 1 + pyproject.toml | 1 + tests/helpers.py | 9 + tests/static/service.zip | Bin 0 -> 2963 bytes tests/test_foxx.py | 245 +++++++++++ 11 files changed, 1332 insertions(+), 6 deletions(-) create mode 100644 arangoasync/foxx.py create mode 100644 docs/foxx.rst create mode 100644 tests/static/service.zip create mode 100644 tests/test_foxx.py diff --git a/arangoasync/database.py b/arangoasync/database.py index d0ddbbb..be057c4 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -74,6 +74,7 @@ DefaultApiExecutor, TransactionApiExecutor, ) +from arangoasync.foxx import Foxx from arangoasync.graph import Graph from arangoasync.request import Method, Request from arangoasync.response import Response @@ -199,6 +200,15 @@ def cluster(self) -> Cluster: """ return Cluster(self._executor) + @property + def foxx(self) -> Foxx: + """Return Foxx API wrapper. + + Returns: + arangoasync.foxx.Foxx: Foxx API wrapper. + """ + return Foxx(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index bfd30d7..99340dd 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -395,6 +395,90 @@ class EdgeListError(ArangoServerError): """Failed to retrieve edges coming in and out of a vertex.""" +class FoxxConfigGetError(ArangoServerError): + """Failed to retrieve Foxx service configuration.""" + + +class FoxxConfigReplaceError(ArangoServerError): + """Failed to replace Foxx service configuration.""" + + +class FoxxConfigUpdateError(ArangoServerError): + """Failed to update Foxx service configuration.""" + + +class FoxxCommitError(ArangoServerError): + """Failed to commit local Foxx service state.""" + + +class FoxxDependencyGetError(ArangoServerError): + """Failed to retrieve Foxx service dependencies.""" + + +class FoxxDependencyReplaceError(ArangoServerError): + """Failed to replace Foxx service dependencies.""" + + +class FoxxDependencyUpdateError(ArangoServerError): + """Failed to update Foxx service dependencies.""" + + +class FoxxScriptListError(ArangoServerError): + """Failed to retrieve Foxx service scripts.""" + + +class FoxxDevModeEnableError(ArangoServerError): + """Failed to enable development mode for Foxx service.""" + + +class FoxxDevModeDisableError(ArangoServerError): + """Failed to disable development mode for Foxx service.""" + + +class FoxxDownloadError(ArangoServerError): + """Failed to download Foxx service bundle.""" + + +class FoxxReadmeGetError(ArangoServerError): + """Failed to retrieve Foxx service readme.""" + + +class FoxxScriptRunError(ArangoServerError): + """Failed to run Foxx service script.""" + + +class FoxxServiceCreateError(ArangoServerError): + """Failed to create Foxx service.""" + + +class FoxxServiceDeleteError(ArangoServerError): + """Failed to delete Foxx services.""" + + +class FoxxServiceGetError(ArangoServerError): + """Failed to retrieve Foxx service metadata.""" + + +class FoxxServiceListError(ArangoServerError): + """Failed to retrieve Foxx services.""" + + +class FoxxServiceReplaceError(ArangoServerError): + """Failed to replace Foxx service.""" + + +class FoxxServiceUpdateError(ArangoServerError): + """Failed to update Foxx service.""" + + +class FoxxSwaggerGetError(ArangoServerError): + """Failed to retrieve Foxx service swagger.""" + + +class FoxxTestRunError(ArangoServerError): + """Failed to run Foxx service tests.""" + + class GraphCreateError(ArangoServerError): """Failed to create the graph.""" diff --git a/arangoasync/foxx.py b/arangoasync/foxx.py new file mode 100644 index 0000000..b74d933 --- /dev/null +++ b/arangoasync/foxx.py @@ -0,0 +1,829 @@ +__all__ = ["Foxx"] + +from typing import Any, Optional + +from arangoasync.exceptions import ( + FoxxCommitError, + FoxxConfigGetError, + FoxxConfigReplaceError, + FoxxConfigUpdateError, + FoxxDependencyGetError, + FoxxDependencyReplaceError, + FoxxDependencyUpdateError, + FoxxDevModeDisableError, + FoxxDevModeEnableError, + FoxxDownloadError, + FoxxReadmeGetError, + FoxxScriptListError, + FoxxScriptRunError, + FoxxServiceCreateError, + FoxxServiceDeleteError, + FoxxServiceGetError, + FoxxServiceListError, + FoxxServiceReplaceError, + FoxxServiceUpdateError, + FoxxSwaggerGetError, + FoxxTestRunError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params, RequestHeaders + + +class Foxx: + """Foxx API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + def __repr__(self) -> str: + return f"" + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def services(self, exclude_system: Optional[bool] = False) -> Result[Jsons]: + """List installed services. + + Args: + exclude_system (bool | None): Exclude system services. + + Returns: + list: List of installed services. + + Raises: + FoxxServiceListError: If retrieval fails. + + References: + - `list-the-installed-services `__ + """ # noqa: E501 + params: Params = {} + if exclude_system is not None: + params["excludeSystem"] = exclude_system + + request = Request( + method=Method.GET, + endpoint="/_api/foxx", + params=params, + ) + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise FoxxServiceListError(resp, request) + result: Jsons = self.deserializer.loads_many(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def service(self, mount: str) -> Result[Json]: + """Return service metadata. + + Args: + mount (str): Service mount path (e.g "/_admin/aardvark"). + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceGetError: If retrieval fails. + + References: + - `get-the-service-description `__ + """ # noqa: E501 + params: Params = {"mount": mount} + request = Request( + method=Method.GET, + endpoint="/_api/foxx/service", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def create_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + development: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + ) -> Result[Json]: + """Installs the given new service at the given mount path. + + Args: + mount (str): Mount path the service should be installed at. + service (Any): Service payload. Can be a JSON string, a file-like object, or a + multipart form. + headers (dict | None): Request headers. + development (bool | None): Whether to install the service in development mode. + setup (bool | None): Whether to run the service setup script. + legacy (bool | None): Whether to install in legacy mode. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceCreateError: If installation fails. + + References: + - `install-a-new-service-mode `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if development is not None: + params["development"] = development + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.POST, + endpoint="/_api/foxx", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def delete_service( + self, + mount: str, + teardown: Optional[bool] = None, + ) -> None: + """Removes the service at the given mount path from the database and file system. + + Args: + mount (str): Mount path of the service to uninstall. + teardown (bool | None): Whether to run the teardown script. + + Raises: + FoxxServiceDeleteError: If operations fails. + + References: + - `uninstall-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + + request = Request( + method=Method.DELETE, + endpoint="/_api/foxx/service", + params=params, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise FoxxServiceDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def replace_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + teardown: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + force: Optional[bool] = None, + ) -> Result[Json]: + """Replace an existing Foxx service at the given mount path. + + Args: + mount (str): Mount path of the service to replace. + service (Any): Service payload (JSON string, file-like object, or multipart form). + headers (dict | None): Optional request headers. + teardown (bool | None): Whether to run the teardown script. + setup (bool | None): Whether to run the setup script. + legacy (bool | None): Whether to install in legacy mode. + force (bool | None): Set to `True` to force service install even if no service is installed under given mount. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceReplaceError: If replacement fails. + + References: + - `replace-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + if force is not None: + params["force"] = force + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/service", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + teardown: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + force: Optional[bool] = None, + ) -> Result[Json]: + """Upgrade a Foxx service at the given mount path. + + Args: + mount (str): Mount path of the service to upgrade. + service (Any): Service payload (JSON string, file-like object, or multipart form). + headers (dict | None): Optional request headers. + teardown (bool | None): Whether to run the teardown script. + setup (bool | None): Whether to run the setup script. + legacy (bool | None): Whether to upgrade in legacy mode. + force (bool | None): Set to `True` to force service install even if no service is installed under given mount. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceUpdateError: If upgrade fails. + + References: + - `upgrade-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + if force is not None: + params["force"] = force + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/service", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def config(self, mount: str) -> Result[Json]: + """Return service configuration. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service configuration. + + Raises: + FoxxConfigGetError: If retrieval fails. + + References: + - `get-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_config(self, mount: str, options: Json) -> Result[Json]: + """Update service configuration. + + Args: + mount (str): Service mount path. + options (dict): Configuration values. Omitted options are ignored. + + Returns: + dict: Updated configuration values. + + Raises: + FoxxConfigUpdateError: If update fails. + + References: + - `update-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def replace_config(self, mount: str, options: Json) -> Result[Json]: + """Replace service configuration. + + Args: + mount (str): Service mount path. + options (dict): Configuration values. Omitted options are reset to their + default values or marked as un-configured. + + Returns: + dict: Replaced configuration values. + + Raises: + FoxxConfigReplaceError: If replace fails. + + References: + - `replace-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def dependencies(self, mount: str) -> Result[Json]: + """Return service dependencies. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service dependencies settings. + + Raises: + FoxxDependencyGetError: If retrieval fails. + + References: + - `get-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_dependencies(self, mount: str, options: Json) -> Result[Json]: + """Update service dependencies. + + Args: + mount (str): Service mount path. + options (dict): Dependencies settings. Omitted ones are ignored. + + Returns: + dict: Updated dependency settings. + + Raises: + FoxxDependencyUpdateError: If update fails. + + References: + - `update-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def replace_dependencies(self, mount: str, options: Json) -> Result[Json]: + """Replace service dependencies. + + Args: + mount (str): Service mount path. + options (dict): Dependencies settings. Omitted ones are disabled. + + Returns: + dict: Replaced dependency settings. + + Raises: + FoxxDependencyReplaceError: If replace fails. + + References: + - `replace-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def scripts(self, mount: str) -> Result[Json]: + """List service scripts. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service scripts. + + Raises: + FoxxScriptListError: If retrieval fails. + + References: + - `list-the-service-scripts `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/scripts", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxScriptListError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def run_script( + self, mount: str, name: str, arg: Optional[Json] = None + ) -> Result[Any]: + """Run a service script. + + Args: + mount (str): Service mount path. + name (str): Script name. + arg (dict | None): Arbitrary value passed into the script as first argument. + + Returns: + Any: Returns the exports of the script, if any. + + Raises: + FoxxScriptRunError: If script fails. + + References: + - `run-a-service-script `__ + """ # noqa: E501 + request = Request( + method=Method.POST, + endpoint=f"/_api/foxx/scripts/{name}", + params={"mount": mount}, + data=self.serializer.dumps(arg) if arg is not None else None, + ) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise FoxxScriptRunError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def run_tests( + self, + mount: str, + reporter: Optional[str] = None, + idiomatic: Optional[bool] = None, + filter: Optional[str] = None, + output_format: Optional[str] = None, + ) -> Result[str]: + """Run service tests. + + Args: + mount (str): Service mount path. + reporter (str | None): Test reporter. Allowed values are "default" (simple + list of test cases), "suite" (object of test cases nested in + suites), "stream" (raw stream of test results), "xunit" (XUnit or + JUnit compatible structure), or "tap" (raw TAP compatible stream). + idiomatic (bool | None): Use matching format for the reporter, regardless of + the value of parameter **output_format**. + filter (str | None): Only run tests whose full name (test suite and + test case) matches the given string. + output_format (str | None): Used to further control format. Allowed values + are "x-ldjson", "xml" and "text". When using "stream" reporter, + setting this to "x-ldjson" returns newline-delimited JSON stream. + When using "tap" reporter, setting this to "text" returns plain + text TAP report. When using "xunit" reporter, settings this to + "xml" returns an XML instead of JSONML. + + Returns: + str: Reporter output (e.g. raw JSON string, XML, plain text). + + Raises: + FoxxTestRunError: If test fails. + + References: + - `run-the-service-tests `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if reporter is not None: + params["reporter"] = reporter + if idiomatic is not None: + params["idiomatic"] = idiomatic + if filter is not None: + params["filter"] = filter + + headers: RequestHeaders = {} + if output_format == "x-ldjson": + headers["accept"] = "application/x-ldjson" + elif output_format == "xml": + headers["accept"] = "application/xml" + elif output_format == "text": + headers["accept"] = "text/plain" + + request = Request( + method=Method.POST, + endpoint="/_api/foxx/tests", + params=params, + headers=headers, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise FoxxTestRunError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def enable_development(self, mount: str) -> Result[Json]: + """Puts the service into development mode. + + While the service is running in development mode, it is reloaded from + the file system, and its setup script (if any) is re-executed every + time the service handles a request. + + In a cluster with multiple coordinators, changes to the filesystem on + one coordinator is not reflected across other coordinators. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service metadata. + + Raises: + FoxxDevModeEnableError: If the operation fails. + + References: + - `enable-the-development-mode `__ + """ # noqa: E501 + request = Request( + method=Method.POST, + endpoint="/_api/foxx/development", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDevModeEnableError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def disable_development(self, mount: str) -> Result[Json]: + """Puts the service into production mode. + + In a cluster with multiple coordinators, the services on all other + coordinators are replaced with the version on the calling coordinator. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service metadata. + + Raises: + FoxxDevModeDisableError: If the operation fails. + + References: + - `disable-the-development-mode `__ + """ # noqa: E501 + request = Request( + method=Method.DELETE, + endpoint="/_api/foxx/development", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDevModeDisableError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def readme(self, mount: str) -> Result[str]: + """Return the service readme. + + Args: + mount (str): Service mount path. + + Returns: + str: Service readme content. + + Raises: + FoxxReadmeGetError: If retrieval fails. + + References: + - `get-the-service-readme `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/readme", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise FoxxReadmeGetError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def swagger(self, mount: str) -> Result[Json]: + """Return the Swagger API description for the given service. + + Args: + mount (str): Service mount path. + + Returns: + dict: Swagger API description. + + Raises: + FoxxSwaggerGetError: If retrieval fails. + + References: + - `get-the-swagger-description `__ + """ # noqa: E501 + request = Request( + method=Method.GET, endpoint="/_api/foxx/swagger", params={"mount": mount} + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxSwaggerGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def download(self, mount: str) -> Result[bytes]: + """Downloads a zip bundle of the service directory. + + When development mode is enabled, this always creates a new bundle. + Otherwise, the bundle will represent the version of a service that is + installed on that ArangoDB instance. + + Args: + mount (str): Service mount path. + + Returns: + bytes: Service bundle zip in raw bytes form. + + Raises: + FoxxDownloadError: If download fails. + + References: + - `download-a-service-bundle `__ + """ # noqa: E501 + request = Request( + method=Method.POST, endpoint="/_api/foxx/download", params={"mount": mount} + ) + + def response_handler(resp: Response) -> bytes: + if not resp.is_success: + raise FoxxDownloadError(resp, request) + return resp.raw_body + + return await self._executor.execute(request, response_handler) + + async def commit(self, replace: Optional[bool] = None) -> None: + """Commit local service state of the coordinator to the database. + + This can be used to resolve service conflicts between coordinators + that cannot be fixed automatically due to missing data. + + Args: + replace (bool | None): If set to `True`, any existing service files in the database + will be overwritten. + + Raises: + FoxxCommitError: If commit fails. + + References: + - `commit-the-local-service-state `__ + """ # noqa: E501 + params: Params = {} + if replace is not None: + params["replace"] = replace + + request = Request( + method=Method.POST, endpoint="/_api/foxx/commit", params=params + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise FoxxCommitError(resp, request) + + await self._executor.execute(request, response_handler) diff --git a/arangoasync/request.py b/arangoasync/request.py index 6bd629d..9c43508 100644 --- a/arangoasync/request.py +++ b/arangoasync/request.py @@ -4,7 +4,7 @@ ] from enum import Enum, auto -from typing import Optional +from typing import Any, Optional from arangoasync.auth import Auth from arangoasync.typings import Params, RequestHeaders @@ -31,7 +31,7 @@ class Request: endpoint (str): API endpoint. headers (dict | None): Request headers. params (dict | None): URL parameters. - data (bytes | None): Request payload. + data (Any): Request payload. auth (Auth | None): Authentication. prefix_needed (bool): Whether the request needs a prefix (e.g., database name). @@ -40,7 +40,7 @@ class Request: endpoint (str): API endpoint. headers (dict | None): Request headers. params (dict | None): URL parameters. - data (bytes | None): Request payload. + data (Any): Request payload. auth (Auth | None): Authentication. prefix_needed (bool): Whether the request needs a prefix (e.g., database name). """ @@ -61,7 +61,7 @@ def __init__( endpoint: str, headers: Optional[RequestHeaders] = None, params: Optional[Params] = None, - data: Optional[bytes | str] = None, + data: Optional[Any] = None, auth: Optional[Auth] = None, prefix_needed: bool = True, ) -> None: @@ -69,7 +69,7 @@ def __init__( self.endpoint: str = endpoint self.headers: RequestHeaders = headers or dict() self.params: Params = params or dict() - self.data: Optional[bytes | str] = data + self.data: Optional[Any] = data self.auth: Optional[Auth] = auth self.prefix_needed = prefix_needed diff --git a/docs/document.rst b/docs/document.rst index 47619db..da6434b 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -177,7 +177,7 @@ To import this file into the "students" collection, we can use the `import_bulk` students = db.collection("students") # Read the JSONL file asynchronously. - async with aiofiles.open('students.jsonl', mode='r') as f: + async with aiofiles.open("students.jsonl", mode="r") as f: documents = await f.read() # Import documents in bulk. diff --git a/docs/foxx.rst b/docs/foxx.rst new file mode 100644 index 0000000..818c80e --- /dev/null +++ b/docs/foxx.rst @@ -0,0 +1,147 @@ +Foxx +---- + +**Foxx** is a microservice framework which lets you define custom HTTP endpoints +that extend ArangoDB's REST API. For more information, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the Foxx API wrapper. + foxx = db.foxx + + # Define the test mount point. + service_mount = "/test_mount" + + # List services. + await foxx.services() + + # Create a service using a source file. + # In this case, the server must have access to the URL. + service = { + "source": "/tests/static/service.zip", + "configuration": {}, + "dependencies": {}, + } + await foxx.create_service( + mount=service_mount, + service=service, + development=True, + setup=True, + legacy=True + ) + + # Update (upgrade) a service. + await db.foxx.update_service( + mount=service_mount, + service=service, + teardown=True, + setup=True, + legacy=False + ) + + # Replace (overwrite) a service. + await db.foxx.replace_service( + mount=service_mount, + service=service, + teardown=True, + setup=True, + legacy=True, + force=False + ) + + # Get service details. + await foxx.service(service_mount) + + # Manage service configuration. + await foxx.config(service_mount) + await foxx.update_config(service_mount, options={}) + await foxx.replace_config(service_mount, options={}) + + # Manage service dependencies. + await foxx.dependencies(service_mount) + await foxx.update_dependencies(service_mount, options={}) + await foxx.replace_dependencies(service_mount, options={}) + + # Toggle development mode for a service. + await foxx.enable_development(service_mount) + await foxx.disable_development(service_mount) + + # Other miscellaneous functions. + await foxx.readme(service_mount) + await foxx.swagger(service_mount) + await foxx.download(service_mount) + await foxx.commit() + await foxx.scripts(service_mount) + await foxx.run_script(service_mount, "setup", {}) + await foxx.run_tests(service_mount, reporter="xunit", output_format="xml") + + # Delete a service. + await foxx.delete_service(service_mount) + +There are other ways to create, update, and replace services, such as +providing a file directly instead of a source URL. This is useful when you +want to deploy a service from a local file system without needing the +server to access the file directly. When using this method, you must provide +the appropriate content type in the headers, such as `application/zip` for ZIP files or +`multipart/form-data` for multipart uploads. The following example demonstrates how to do this: + +.. code-block:: python + + import aiofiles + import aiohttp + import json + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the Foxx API wrapper. + foxx = db.foxx + + # Define the test mount points. + mount_point = "/test_mount" + + # Create the service using multipart/form-data. + service = aiohttp.FormData() + service.add_field( + "source", + open("./tests/static/service.zip", "rb"), + filename="service.zip", + content_type="application/zip", + ) + service.add_field("configuration", json.dumps({})) + service.add_field("dependencies", json.dumps({})) + service_info = await db.foxx.create_service( + mount=mount_point, service=service, headers={"content-type": "multipart/form-data"} + ) + + # Replace the service using raw data. + async with aiofiles.open("./tests/static/service.zip", mode="rb") as f: + service = await f.read() + service_info = await db.foxx.replace_service( + mount=mount_point, service=service, headers={"content-type": "application/zip"} + ) + + # Delete the service. + await db.foxx.delete_service(mount_point) + +See :class:`arangoasync.foxx.Foxx` for API specification. diff --git a/docs/index.rst b/docs/index.rst index 65eefd3..78afe62 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -43,6 +43,7 @@ Contents .. toctree:: :maxdepth: 1 + foxx transaction view analyzer diff --git a/pyproject.toml b/pyproject.toml index c5c890f..ef00aea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ version = { attr = "arangoasync.version.__version__" } [project.optional-dependencies] dev = [ + "aiofiles>=24.1.0", "black>=24.2", "flake8>=7.0", "isort>=5.10", diff --git a/tests/helpers.py b/tests/helpers.py index dfaae4d..0e6e8a8 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -80,3 +80,12 @@ def generate_task_id(): str: Random task ID """ return f"test_task_id_{uuid4().hex}" + + +def generate_service_mount(): + """Generate and return a random service name. + + Returns: + str: Random service name. + """ + return f"/test_{uuid4().hex}" diff --git a/tests/static/service.zip b/tests/static/service.zip new file mode 100644 index 0000000000000000000000000000000000000000..00bf513ebf1066886e93020d36d46697ae55a134 GIT binary patch literal 2963 zcmZ`*c{r47A0FEnLyRR!SwkkrR+$9w;l`}#f4?>0a&F!F#vAP4|qkY!PtnG$eP z2fzZrNJ~u|lQn_BdH|RKxZ5~;$hdf4laYhfsa}QpjS-cMQ5N`OB$Kc~kS3C3*6AQyh5dm^L?K)=_stU@T|>CLc&k zIx9`0$KJb6Pk{tTIuPi{pNTx29dOth~CMvlR%Z!!zx$kU_?9TI%%%eDDte$SEm<|3sIuQv_K(*zz2-Wh6JKgOZw zcjQZDhCEwcS}x|yHjxAu-X5!ST1H(hQgxoU(DP08$1b=$R6VTQ7wO%CX`qcHpdI!f z?ReVb+`KIae{~eDudLrH$2PGcRhySwFmd}R_b)|75R;%ZRGgT~$;imJ`@R$%ue)?9 ztvlH7wkYk+=G&agY2ik^eA7_AtQbc(w``y8z6z_kggt#s)(J<^VsYzu&(pSD1>15U ze>~swpiDZVa#d3;Ig7JZ*f2Lr3>u{4@ zV#I=7Kv(^}=f4@^vqhPWI3W7)LKVH4@;!0OObmVn{*8a2sZLNn< zq#{^zdGs>8w>_G94oEWtIm+J))}@9YF>kOO7Q=H#!ukv&2-M2)ml!uaeE=c;5o0KI z(sLXE>zLd(`%Fp%>5qOL5pIkf8Kt);E_4UM!Ku9c6&U2E&Y4ofxnQaQDJ$;Nt$cX` z2hx6GYqhh6qd(fr$$Bhb3^Cy&n^KLq3L&=e>|GOs2I~Lz8^uozw=edf;B z!N{om=&Nn+w*?+aV_w%5D|(7AqhjmEs-LACh8Jc`GQ zq@xOO(MBt2e?BbE=yJgeHjIuLbZNHkPHaVJv}ic&2)F`#@C@E6kx)%F!S*ekTbXEc z?Yy95lc`?dUB4bm?m4}eeGK}xR2R{wied1VueJvZZ6V~-*xMWQ7iN(P4H-S{8f?o2 zu()RfT?Fh{Gg+tUIxFXUg_1dX9OH+a^bq!e; zv%gy_Bz>@{TGSzTQ}Fwzq6L*B@sMq-%n|c$(PL7NJkL zN$Z+~?&~E*ECKSrvv4&c!u$I-IZ!NHjWjqkX7vORzJxl)t9(N(@X0&wTB2!4!+a ztWRg0`N-~wO0V`GvmMUXzLlaDI;Az*8qf%w^@U3(%FApW%nU}JzTabUgVS@>lhZQ% zdIKT4k@itS&6*r5#f03M-V@+eM0`x_2MVQD^woO%psRRRhWxBAdC~3>+p7gB@)Dk* zuAOBi?z7{|?XcnVbC}bG6pso|4%;9%RRQyc?|Z1SFo++ib=LY_ZM`(QUA4vf2`(D< z#a$F*G6K!}L{YeiG~8v9-847P^@@v^rE>{2o#T3($j2;Qo~}#1LJT0vjFj-*uoWPZ zo9n*bKrJgYd(W7n7` z6F0zF}8?P|zLbdv<#Qq z=uNef(9(lM<(xBi3umzuCG!(uYiX96Hz=h1*Amw)lahyyNu*J1uWU61D9lxLYRJ-C zYos}y!a8!fhe|~UMC51sh4L=Ld78dO1*A11>=y6-ILjk0 zrYLuXootZO6VXg1c3t$NCO7QN)cc8E#*6#fmnfJrMOPoVUc%@3dvfP~*Q`)`QaIxI zWo}_(gyR!k-6flam>p&3#S+289XRH9i$nmG*8u=_3FyZ Date: Sat, 16 Aug 2025 19:05:42 +0800 Subject: [PATCH 10/14] Administration API (#66) * Getting started on administration API * Adding more Administration methods * Finishing up Administration API * Adding admin to to toctree --- arangoasync/database.py | 424 ++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 68 +++++- docs/admin.rst | 47 +++++ docs/index.rst | 1 + tests/test_database.py | 95 ++++++++- 5 files changed, 632 insertions(+), 3 deletions(-) create mode 100644 docs/admin.rst diff --git a/arangoasync/database.py b/arangoasync/database.py index be057c4..449b789 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -6,6 +6,7 @@ ] +from datetime import datetime from typing import Any, List, Optional, Sequence, TypeVar, cast from warnings import warn @@ -26,10 +27,12 @@ CollectionDeleteError, CollectionKeyGeneratorsError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, GraphCreateError, GraphDeleteError, GraphListError, @@ -39,8 +42,22 @@ PermissionListError, PermissionResetError, PermissionUpdateError, + ServerAvailableOptionsGetError, + ServerCheckAvailabilityError, + ServerCurrentOptionsGetError, + ServerEchoError, ServerEncryptionError, + ServerEngineError, + ServerExecuteError, + ServerLicenseGetError, + ServerLicenseSetError, + ServerModeError, + ServerModeSetError, + ServerReloadRoutingError, + ServerShutdownError, + ServerShutdownProgressError, ServerStatusError, + ServerTimeError, ServerTLSError, ServerTLSReloadError, ServerVersionError, @@ -2437,6 +2454,413 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def engine(self) -> Result[Json]: + """Returns the storage engine the server is configured to use. + + Returns: + dict: Database engine details. + + Raises: + ServerEngineError: If the operation fails. + + References: + - `get-the-storage-engine-type `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/engine") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEngineError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def time(self) -> Result[datetime]: + """Return server system time. + + Returns: + datetime.datetime: Server system time. + + Raises: + ServerTimeError: If the operation fails. + + References: + - `get-the-system-time `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/time") + + def response_handler(resp: Response) -> datetime: + if not resp.is_success: + raise ServerTimeError(resp, request) + return datetime.fromtimestamp( + self.deserializer.loads(resp.raw_body)["time"] + ) + + return await self._executor.execute(request, response_handler) + + async def check_availability(self) -> Result[str]: + """Return ArangoDB server availability mode. + + Returns: + str: Server availability mode, either "readonly" or "default". + + Raises: + ServerCheckAvailabilityError: If the operation fails. + + References: + - `check-server-availability `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/availability", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerCheckAvailabilityError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["mode"]) + + return await self._executor.execute(request, response_handler) + + async def support_info(self) -> Result[Json]: + """Retrieves deployment information for support purposes. + + Note: + As this API may reveal sensitive data about the deployment, it can only be accessed from inside the _system database. + + Returns: + dict: Deployment information + + Raises: + DatabaseSupportInfoError: If the operation fails. + + References: + - `get-information-about-the-deployment `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/support-info") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise DatabaseSupportInfoError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def options(self) -> Result[Json]: + """Return the currently-set server options. + + Returns: + dict: Server options. + + Raises: + ServerCurrentOptionsGetError: If the operation fails. + + References: + - `get-the-startup-option-configuration `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/options") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerCurrentOptionsGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def options_available(self) -> Result[Json]: + """Return a description of all available server options. + + Returns: + dict: Server options description. + + Raises: + ServerAvailableOptionsGetError: If the operation fails. + + References: + - `get-the-available-startup-options `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/options-description") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerAvailableOptionsGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def mode(self) -> Result[str]: + """Return the server mode ("default" or "readonly"). + + Returns: + str: Server mode, either "default" or "readonly". + + Raises: + ServerModeError: If the operation fails. + + References: + - `return-whether-or-not-a-server-is-in-read-only-mode `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/server/mode") + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerModeError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["mode"]) + + return await self._executor.execute(request, response_handler) + + async def set_mode(self, mode: str) -> Result[str]: + """Set the server mode to read-only or default. + + Args: + mode (str): Server mode. Possible values are "default" or "readonly". + + Returns: + str: New server mode. + + Raises: + ServerModeSetError: If the operation fails. + + References: + - `set-the-server-mode-to-read-only-or-default `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/server/mode", + data=self.serializer.dumps({"mode": mode}), + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerModeSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["mode"]) + + return await self._executor.execute(request, response_handler) + + async def license(self) -> Result[Json]: + """View the license information and status of an Enterprise Edition instance. + + Returns: + dict: Server license information. + + Raises: + ServerLicenseGetError: If the operation fails. + + References: + - `get-information-about-the-current-license `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/license") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLicenseGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_license(self, license: str, force: Optional[bool] = False) -> None: + """Set a new license for an Enterprise Edition instance. + + Args: + license (str) -> Base64-encoded license string, wrapped in double-quotes. + force (bool | None) -> Set to `True` to change the license even if it + expires sooner than the current one. + + Raises: + ServerLicenseSetError: If the operation fails. + + References: + - `set-a-new-license `__ + """ # noqa: E501 + params: Params = {} + if force is not None: + params["force"] = force + + request = Request( + method=Method.PUT, + endpoint="/_admin/license", + params=params, + data=license, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerLicenseSetError(resp, request) + + await self._executor.execute(request, response_handler) + + async def shutdown(self, soft: Optional[bool] = None) -> None: + """Initiate server shutdown sequence. + + Args: + soft (bool | None): If set to `True`, this initiates a soft shutdown. + + Raises: + ServerShutdownError: If the operation fails. + + References: + - `start-the-shutdown-sequence `__ + """ # noqa: E501 + params: Params = {} + if soft is not None: + params["soft"] = soft + + request = Request( + method=Method.DELETE, + endpoint="/_admin/shutdown", + params=params, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerShutdownError(resp, request) + + await self._executor.execute(request, response_handler) + + async def shutdown_progress(self) -> Result[Json]: + """Query the soft shutdown progress. + + Returns: + dict: Information about the shutdown progress. + + Raises: + ServerShutdownProgressError: If the operation fails. + + References: + - `query-the-soft-shutdown-progress `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/shutdown") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerShutdownProgressError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def compact( + self, + change_level: Optional[bool] = None, + compact_bottom_most_level: Optional[bool] = None, + ) -> None: + """Compact all databases. This method requires superuser access. + + Note: + This command can cause a full rewrite of all data in all databases, + which may take very long for large databases. + + Args: + change_level (bool | None): Whether or not compacted data should be + moved to the minimum possible level. Default value is `False`. + compact_bottom_most_level (bool | None): Whether or not to compact the bottom-most level of data. + Default value is `False`. + + Returns: + dict: Information about the compaction process. + + Raises: + DatabaseCompactError: If the operation fails. + + References: + - `compact-all-databases `__ + """ # noqa: E501 + data = {} + if change_level is not None: + data["changeLevel"] = change_level + if compact_bottom_most_level is not None: + data["compactBottomMostLevel"] = compact_bottom_most_level + + request = Request( + method=Method.PUT, + endpoint="/_admin/compact", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise DatabaseCompactError(resp, request) + + await self._executor.execute(request, response_handler) + + async def reload_routing(self) -> None: + """Reload the routing information. + + Raises: + ServerReloadRoutingError: If the operation fails. + + References: + - `reload-the-routing-table `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/routing/reload") + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerReloadRoutingError(resp, request) + + await self._executor.execute(request, response_handler) + + async def echo(self, body: Optional[Json] = None) -> Result[Json]: + """Return an object with the servers request information. + + Args: + body (dict | None): Optional body of the request. + + Returns: + dict: Details of the request. + + Raises: + ServerEchoError: If the operation fails. + + References: + - `echo-a-request `__ + """ # noqa: E501 + data = body if body is not None else {} + request = Request(method=Method.POST, endpoint="/_admin/echo", data=data) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEchoError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def execute(self, command: str) -> Result[Any]: + """Execute raw Javascript command on the server. + + Args: + command (str): Javascript command to execute. + + Returns: + Return value of **command**, if any. + + Raises: + ServerExecuteError: If the execution fails. + + References: + - `execute-a-script `__ + """ # noqa: E501 + request = Request( + method=Method.POST, endpoint="/_admin/execute", data=command.encode("utf-8") + ) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise ServerExecuteError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 99340dd..96a432a 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -319,6 +319,10 @@ class CursorStateError(ArangoClientError): """The cursor object was in a bad state.""" +class DatabaseCompactError(ArangoServerError): + """Failed to compact databases.""" + + class DatabaseCreateError(ArangoServerError): """Failed to create database.""" @@ -335,6 +339,10 @@ class DatabasePropertiesError(ArangoServerError): """Failed to retrieve database properties.""" +class DatabaseSupportInfoError(ArangoServerError): + """Failed to retrieve support info for deployment.""" + + class DeserializationError(ArangoClientError): """Failed to deserialize the server response.""" @@ -547,14 +555,66 @@ class SerializationError(ArangoClientError): """Failed to serialize the request.""" -class ServerEncryptionError(ArangoServerError): - """Failed to reload user-defined encryption keys.""" +class ServerAvailableOptionsGetError(ArangoServerError): + """Failed to retrieve available server options.""" + + +class ServerCheckAvailabilityError(ArangoServerError): + """Failed to retrieve server availability mode.""" class ServerConnectionError(ArangoServerError): """Failed to connect to ArangoDB server.""" +class ServerCurrentOptionsGetError(ArangoServerError): + """Failed to retrieve currently-set server options.""" + + +class ServerEchoError(ArangoServerError): + """Failed to retrieve details on last request.""" + + +class ServerEncryptionError(ArangoServerError): + """Failed to reload user-defined encryption keys.""" + + +class ServerEngineError(ArangoServerError): + """Failed to retrieve database engine.""" + + +class ServerExecuteError(ArangoServerError): + """Failed to execute raw JavaScript command.""" + + +class ServerModeError(ArangoServerError): + """Failed to retrieve server mode.""" + + +class ServerModeSetError(ArangoServerError): + """Failed to set server mode.""" + + +class ServerLicenseGetError(ArangoServerError): + """Failed to retrieve server license.""" + + +class ServerLicenseSetError(ArangoServerError): + """Failed to set server license.""" + + +class ServerReloadRoutingError(ArangoServerError): + """Failed to reload routing details.""" + + +class ServerShutdownError(ArangoServerError): + """Failed to initiate shutdown sequence.""" + + +class ServerShutdownProgressError(ArangoServerError): + """Failed to retrieve soft shutdown progress.""" + + class ServerStatusError(ArangoServerError): """Failed to retrieve server status.""" @@ -567,6 +627,10 @@ class ServerTLSReloadError(ArangoServerError): """Failed to reload TLS.""" +class ServerTimeError(ArangoServerError): + """Failed to retrieve server system time.""" + + class ServerVersionError(ArangoServerError): """Failed to retrieve server version.""" diff --git a/docs/admin.rst b/docs/admin.rst new file mode 100644 index 0000000..6a494d1 --- /dev/null +++ b/docs/admin.rst @@ -0,0 +1,47 @@ +Server Administration +--------------------- + +ArangoDB provides operations for server administration and monitoring. +Most of these operations can only be performed by admin users via the +``_system`` database. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "_system" database as root user. + sys_db = await client.db("_system", auth=auth) + + # Retrieve the database engine. + await sys_db.engine() + + # Retrieve the server time.. + time = await sys_db.time() + + # Check server availability + availability = sys_db.check_availability() + + # Support info + info = sys_db.support_info() + + # Get the startup option configuration + options = await sys_db.options() + + # Get the available startup options + options = await sys_db.options_available() + + # Return whether or not a server is in read-only mode + mode = await sys_db.mode() + + # Get license information + license = await sys_db.license() + + # Execute Javascript on the server + result = await sys_db.execute("return 1") diff --git a/docs/index.rst b/docs/index.rst index 78afe62..0fab3ac 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -61,6 +61,7 @@ Contents .. toctree:: :maxdepth: 1 + admin user **Miscellaneous** diff --git a/tests/test_database.py b/tests/test_database.py index 7058ac1..5daa837 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -1,21 +1,39 @@ import asyncio +import datetime import pytest from packaging import version +from arangoasync.client import ArangoClient from arangoasync.collection import StandardCollection from arangoasync.exceptions import ( CollectionCreateError, CollectionDeleteError, CollectionKeyGeneratorsError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, JWTSecretListError, JWTSecretReloadError, + ServerAvailableOptionsGetError, + ServerCheckAvailabilityError, + ServerCurrentOptionsGetError, + ServerEchoError, + ServerEngineError, + ServerExecuteError, + ServerLicenseGetError, + ServerLicenseSetError, + ServerModeError, + ServerModeSetError, + ServerReloadRoutingError, + ServerShutdownError, + ServerShutdownProgressError, ServerStatusError, + ServerTimeError, ServerVersionError, ) from arangoasync.typings import CollectionType, KeyOptions, UserInfo @@ -23,7 +41,9 @@ @pytest.mark.asyncio -async def test_database_misc_methods(sys_db, db, bad_db, cluster, db_version): +async def test_database_misc_methods( + sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token +): # Status status = await sys_db.status() assert status["server"] == "arango" @@ -64,6 +84,79 @@ async def test_database_misc_methods(sys_db, db, bad_db, cluster, db_version): with pytest.raises(CollectionKeyGeneratorsError): await bad_db.key_generators() + # Administration + with pytest.raises(ServerEngineError): + await bad_db.engine() + result = await db.engine() + assert isinstance(result, dict) + + with pytest.raises(ServerTimeError): + await bad_db.time() + time = await db.time() + assert isinstance(time, datetime.datetime) + + with pytest.raises(ServerCheckAvailabilityError): + await bad_db.check_availability() + assert isinstance(await db.check_availability(), str) + + with pytest.raises(DatabaseSupportInfoError): + await bad_db.support_info() + info = await sys_db.support_info() + assert isinstance(info, dict) + + if db_version >= version.parse("3.12.0"): + with pytest.raises(ServerCurrentOptionsGetError): + await bad_db.options() + options = await sys_db.options() + assert isinstance(options, dict) + with pytest.raises(ServerAvailableOptionsGetError): + await bad_db.options_available() + options_available = await sys_db.options_available() + assert isinstance(options_available, dict) + + with pytest.raises(ServerModeError): + await bad_db.mode() + mode = await sys_db.mode() + assert isinstance(mode, str) + with pytest.raises(ServerModeSetError): + await bad_db.set_mode("foo") + mode = await sys_db.set_mode("default") + assert isinstance(mode, str) + + with pytest.raises(ServerLicenseGetError): + await bad_db.license() + license = await sys_db.license() + assert isinstance(license, dict) + with pytest.raises(ServerLicenseSetError): + await sys_db.set_license('"abc"') + + with pytest.raises(ServerShutdownError): + await bad_db.shutdown() + with pytest.raises(ServerShutdownProgressError): + await bad_db.shutdown_progress() + + with pytest.raises(ServerReloadRoutingError): + await bad_db.reload_routing() + await sys_db.reload_routing() + + with pytest.raises(ServerEchoError): + await bad_db.echo() + result = await sys_db.echo() + assert isinstance(result, dict) + + with pytest.raises(ServerExecuteError): + await bad_db.execute("return 1") + result = await sys_db.execute("return 1") + assert result == 1 + + with pytest.raises(DatabaseCompactError): + await bad_db.compact() + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + await db.compact() + @pytest.mark.asyncio async def test_create_drop_database( From 1dd20747988976588d0a3b16d5d5d9d21fdbed70 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sat, 16 Aug 2025 19:49:13 +0800 Subject: [PATCH 11/14] Adding custom requests (#67) --- arangoasync/database.py | 15 +++++++++++++++ docs/database.rst | 7 +++++++ tests/test_database.py | 9 +++++++++ 3 files changed, 31 insertions(+) diff --git a/arangoasync/database.py b/arangoasync/database.py index 449b789..813a1ab 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -2861,6 +2861,21 @@ def response_handler(resp: Response) -> Any: return await self._executor.execute(request, response_handler) + async def request(self, request: Request) -> Result[Response]: + """Execute a custom request. + + Args: + request (Request): Request object to be executed. + + Returns: + Response: Response object containing the result of the request. + """ + + def response_handler(resp: Response) -> Response: + return resp + + return await self._executor.execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper. diff --git a/docs/database.rst b/docs/database.rst index 851cc9d..f4dc759 100644 --- a/docs/database.rst +++ b/docs/database.rst @@ -14,6 +14,7 @@ information. from arangoasync import ArangoClient from arangoasync.auth import Auth + from arangoasync.request import Method, Request # Initialize the client for ArangoDB. async with ArangoClient(hosts="http://localhost:8529") as client: @@ -60,4 +61,10 @@ information. # Delete the database. Note that the new users will remain. await sys_db.delete_database("test") + # Example of a custom request + request = Request( + method=Method.POST, endpoint="/_admin/execute", data="return 1".encode("utf-8") + ) + response = await sys_db.request(request) + See :class:`arangoasync.client.ArangoClient` and :class:`arangoasync.database.StandardDatabase` for API specification. diff --git a/tests/test_database.py b/tests/test_database.py index 5daa837..c9a260b 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -1,5 +1,6 @@ import asyncio import datetime +import json import pytest from packaging import version @@ -36,6 +37,7 @@ ServerTimeError, ServerVersionError, ) +from arangoasync.request import Method, Request from arangoasync.typings import CollectionType, KeyOptions, UserInfo from tests.helpers import generate_col_name, generate_db_name, generate_username @@ -157,6 +159,13 @@ async def test_database_misc_methods( ) await db.compact() + # Custom Request + request = Request( + method=Method.POST, endpoint="/_admin/execute", data="return 1".encode("utf-8") + ) + response = await sys_db.request(request) + assert json.loads(response.raw_body) == 1 + @pytest.mark.asyncio async def test_create_drop_database( From f1de45bf445f500848d3b0e413355a026b4d7be6 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sat, 16 Aug 2025 21:20:16 +0800 Subject: [PATCH 12/14] Monitoring API (#68) * Adding monitoring API * API calls only in 3.12 * API calls only in enterprise --- arangoasync/database.py | 343 +++++++++++++++++++++++++++++++++++++- arangoasync/exceptions.py | 32 ++++ docs/admin.rst | 3 + tests/test_database.py | 54 +++++- 4 files changed, 430 insertions(+), 2 deletions(-) diff --git a/arangoasync/database.py b/arangoasync/database.py index 813a1ab..2997bab 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -7,7 +7,7 @@ from datetime import datetime -from typing import Any, List, Optional, Sequence, TypeVar, cast +from typing import Any, Dict, List, Optional, Sequence, TypeVar, cast from warnings import warn from arangoasync.aql import AQL @@ -42,6 +42,7 @@ PermissionListError, PermissionResetError, PermissionUpdateError, + ServerApiCallsError, ServerAvailableOptionsGetError, ServerCheckAvailabilityError, ServerCurrentOptionsGetError, @@ -51,8 +52,15 @@ ServerExecuteError, ServerLicenseGetError, ServerLicenseSetError, + ServerLogLevelError, + ServerLogLevelResetError, + ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, + ServerMetricsError, ServerModeError, ServerModeSetError, + ServerReadLogError, ServerReloadRoutingError, ServerShutdownError, ServerShutdownProgressError, @@ -2876,6 +2884,339 @@ def response_handler(resp: Response) -> Response: return await self._executor.execute(request, response_handler) + async def metrics(self, server_id: Optional[str] = None) -> Result[str]: + """Return server metrics in Prometheus format. + + Args: + server_id (str | None): Returns metrics of the specified server. + If no serverId is given, the asked server will reply. + + Returns: + str: Server metrics in Prometheus format. + + Raises: + ServerMetricsError: If the operation fails. + + References: + - `metrics-api-v2 `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.GET, + endpoint="/_admin/metrics/v2", + params=params, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerMetricsError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def read_log_entries( + self, + upto: Optional[int | str] = None, + level: Optional[str] = None, + start: Optional[int] = None, + size: Optional[int] = None, + offset: Optional[int] = None, + search: Optional[str] = None, + sort: Optional[str] = None, + server_id: Optional[str] = None, + ) -> Result[Json]: + """Read the global log from server. + + Args: + upto (int | str | None): Return the log entries up to the given level + (mutually exclusive with parameter **level**). Allowed values are + "fatal", "error", "warning", "info" (default), "debug" and "trace". + level (int | str | None): Return the log entries of only the given level + (mutually exclusive with **upto**). + start (int | None): Return the log entries whose ID is greater or equal to + the given value. + size (int | None): Restrict the size of the result to the given value. + This can be used for pagination. + offset (int | None): Number of entries to skip (e.g. for pagination). + search (str | None): Return only the log entries containing the given text. + sort (str | None): Sort the log entries according to the given fashion, + which can be "sort" or "desc". + server_id (str | None): Returns all log entries of the specified server. + If no serverId is given, the asked server will reply. + + Returns: + dict: Server log entries. + + Raises: + ServerReadLogError: If the operation fails. + + References: + - `get-the-global-server-logs `__ + """ # noqa: E501 + params: Params = {} + if upto is not None: + params["upto"] = upto + if level is not None: + params["level"] = level + if start is not None: + params["start"] = start + if size is not None: + params["size"] = size + if offset is not None: + params["offset"] = offset + if search is not None: + params["search"] = search + if sort is not None: + params["sort"] = sort + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.GET, + endpoint="/_admin/log/entries", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerReadLogError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def log_levels( + self, server_id: Optional[str] = None, with_appenders: Optional[bool] = None + ) -> Result[Json]: + """Return current logging levels. + + Args: + server_id (str | None): Forward the request to the specified server. + with_appenders (bool | None): Include appenders in the response. + + Returns: + dict: Current logging levels. + + Raises: + ServerLogLevelError: If the operation fails. + + References: + - `get-the-server-log-levels `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders + + request = Request( + method=Method.GET, + endpoint="/_admin/log/level", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_log_levels( + self, + server_id: Optional[str] = None, + with_appenders: Optional[bool] = None, + **kwargs: Dict[str, Any], + ) -> Result[Json]: + """Set the logging levels. + + This method takes arbitrary keyword arguments where the keys are the + logger names and the values are the logging levels. For example: + + .. code-block:: python + + db.set_log_levels( + agency='DEBUG', + collector='INFO', + threads='WARNING' + ) + + Keys that are not valid logger names are ignored. + + Args: + server_id (str | None) -> Forward the request to a specific server. + with_appenders (bool | None): Include appenders in the response. + kwargs (dict): Logging levels to be set. + + Returns: + dict: New logging levels. + + Raises: + ServerLogLevelSetError: If the operation fails. + + References: + - `set-the-structured-log-settings `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders + + request = Request( + method=Method.PUT, + endpoint="/_admin/log/level", + params=params, + data=self.serializer.dumps(kwargs), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def reset_log_levels(self, server_id: Optional[str] = None) -> Result[Json]: + """Reset the logging levels. + + Revert the server’s log level settings to the values they had at startup, + as determined by the startup options specified on the command-line, + a configuration file, and the factory defaults. + + Args: + server_id: Forward the request to a specific server. + + Returns: + dict: New logging levels. + + Raises: + ServerLogLevelResetError: If the operation fails. + + References: + - `reset-the-server-log-levels `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.DELETE, + endpoint="/_admin/log/level", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelResetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def log_settings(self) -> Result[Json]: + """Get the structured log settings. + + Returns: + dict: Current structured log settings. + + Raises: + ServerLogSettingError: If the operation fails. + + References: + - `get-the-structured-log-settings `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/log/structured", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_log_settings(self, **kwargs: Dict[str, Any]) -> Result[Json]: + """Set the structured log settings. + + This method takes arbitrary keyword arguments where the keys are the + structured log parameters and the values are true or false, for either + enabling or disabling the parameters. + + .. code-block:: python + + db.set_log_settings( + database=True, + url=True, + username=False, + ) + + Args: + kwargs (dict): Structured log parameters to be set. + + Returns: + dict: New structured log settings. + + Raises: + ServerLogSettingSetError: If the operation fails. + + References: + - `set-the-structured-log-settings `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/log/structured", + data=self.serializer.dumps(kwargs), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def api_calls(self) -> Result[Json]: + """Get a list of the most recent requests with a timestamp and the endpoint. + + Returns: + dict: API calls made to the server. + + Raises: + ServerApiCallsError: If the operation fails. + + References: + - `get-recent-api-calls `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/api-calls", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerApiCallsError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 96a432a..ebe028e 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -555,6 +555,10 @@ class SerializationError(ArangoClientError): """Failed to serialize the request.""" +class ServerApiCallsError(ArangoServerError): + """Failed to retrieve the list of recent API calls.""" + + class ServerAvailableOptionsGetError(ArangoServerError): """Failed to retrieve available server options.""" @@ -587,6 +591,10 @@ class ServerExecuteError(ArangoServerError): """Failed to execute raw JavaScript command.""" +class ServerMetricsError(ArangoServerError): + """Failed to retrieve server metrics.""" + + class ServerModeError(ArangoServerError): """Failed to retrieve server mode.""" @@ -603,6 +611,30 @@ class ServerLicenseSetError(ArangoServerError): """Failed to set server license.""" +class ServerLogLevelError(ArangoServerError): + """Failed to retrieve server log levels.""" + + +class ServerLogLevelResetError(ArangoServerError): + """Failed to reset server log levels.""" + + +class ServerLogLevelSetError(ArangoServerError): + """Failed to set server log levels.""" + + +class ServerLogSettingError(ArangoServerError): + """Failed to retrieve server log settings.""" + + +class ServerLogSettingSetError(ArangoServerError): + """Failed to set server log settings.""" + + +class ServerReadLogError(ArangoServerError): + """Failed to retrieve global log.""" + + class ServerReloadRoutingError(ArangoServerError): """Failed to reload routing details.""" diff --git a/docs/admin.rst b/docs/admin.rst index 6a494d1..6120567 100644 --- a/docs/admin.rst +++ b/docs/admin.rst @@ -45,3 +45,6 @@ Most of these operations can only be performed by admin users via the # Execute Javascript on the server result = await sys_db.execute("return 1") + + # Get metrics in Prometheus format + metrics = await db.metrics() diff --git a/tests/test_database.py b/tests/test_database.py index c9a260b..425007b 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -20,6 +20,7 @@ DatabaseSupportInfoError, JWTSecretListError, JWTSecretReloadError, + ServerApiCallsError, ServerAvailableOptionsGetError, ServerCheckAvailabilityError, ServerCurrentOptionsGetError, @@ -28,8 +29,15 @@ ServerExecuteError, ServerLicenseGetError, ServerLicenseSetError, + ServerLogLevelError, + ServerLogLevelResetError, + ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, + ServerMetricsError, ServerModeError, ServerModeSetError, + ServerReadLogError, ServerReloadRoutingError, ServerShutdownError, ServerShutdownProgressError, @@ -44,7 +52,7 @@ @pytest.mark.asyncio async def test_database_misc_methods( - sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token + sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token, enterprise ): # Status status = await sys_db.status() @@ -166,6 +174,50 @@ async def test_database_misc_methods( response = await sys_db.request(request) assert json.loads(response.raw_body) == 1 + if enterprise and db_version >= version.parse("3.12.0"): + # API calls + with pytest.raises(ServerApiCallsError): + await bad_db.api_calls() + result = await sys_db.api_calls() + assert isinstance(result, dict) + + +@pytest.mark.asyncio +async def test_metrics(db, bad_db): + with pytest.raises(ServerMetricsError): + await bad_db.metrics() + metrics = await db.metrics() + assert isinstance(metrics, str) + + +@pytest.mark.asyncio +async def test_logs(sys_db, bad_db): + with pytest.raises(ServerReadLogError): + await bad_db.read_log_entries() + result = await sys_db.read_log_entries() + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelError): + await bad_db.log_levels() + result = await sys_db.log_levels() + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelSetError): + await bad_db.set_log_levels() + new_levels = {"agency": "DEBUG", "engines": "INFO", "threads": "WARNING"} + result = await sys_db.set_log_levels(**new_levels) + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelResetError): + await bad_db.reset_log_levels() + result = await sys_db.reset_log_levels() + assert isinstance(result, dict) + with pytest.raises(ServerLogSettingError): + await bad_db.log_settings() + result = await sys_db.log_settings() + assert isinstance(result, dict) + with pytest.raises(ServerLogSettingSetError): + await bad_db.set_log_settings() + result = await sys_db.set_log_settings() + assert isinstance(result, dict) + @pytest.mark.asyncio async def test_create_drop_database( From 4bc2ca70eabc8a75c55887d4623acbc66b6e0fa8 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 17 Aug 2025 12:48:53 +0800 Subject: [PATCH 13/14] Replication APIjk (#69) * Adding replication API * Test fixes --- arangoasync/database.py | 10 ++ arangoasync/exceptions.py | 28 ++++ arangoasync/replication.py | 270 +++++++++++++++++++++++++++++++++++++ docs/migration.rst | 9 +- tests/test_database.py | 37 +++++ 5 files changed, 353 insertions(+), 1 deletion(-) create mode 100644 arangoasync/replication.py diff --git a/arangoasync/database.py b/arangoasync/database.py index 2997bab..a28fa43 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -101,6 +101,7 @@ ) from arangoasync.foxx import Foxx from arangoasync.graph import Graph +from arangoasync.replication import Replication from arangoasync.request import Method, Request from arangoasync.response import Response from arangoasync.result import Result @@ -234,6 +235,15 @@ def foxx(self) -> Foxx: """ return Foxx(self._executor) + @property + def replication(self) -> Replication: + """Return Replication API wrapper. + + Returns: + Replication API wrapper. + """ + return Replication(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index ebe028e..5a904ee 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -551,6 +551,34 @@ class PermissionUpdateError(ArangoServerError): """Failed to update user permission.""" +class ReplicationApplierConfigError(ArangoServerError): + """Failed to retrieve replication applier configuration.""" + + +class ReplicationApplierStateError(ArangoServerError): + """Failed to retrieve replication applier state.""" + + +class ReplicationClusterInventoryError(ArangoServerError): + """Failed to retrieve overview of collection and indexes in a cluster.""" + + +class ReplicationDumpError(ArangoServerError): + """Failed to retrieve collection content.""" + + +class ReplicationInventoryError(ArangoServerError): + """Failed to retrieve inventory of collection and indexes.""" + + +class ReplicationLoggerStateError(ArangoServerError): + """Failed to retrieve logger state.""" + + +class ReplicationServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + class SerializationError(ArangoClientError): """Failed to serialize the request.""" diff --git a/arangoasync/replication.py b/arangoasync/replication.py new file mode 100644 index 0000000..9d96709 --- /dev/null +++ b/arangoasync/replication.py @@ -0,0 +1,270 @@ +__all__ = ["Replication"] + + +from typing import Optional + +from arangoasync.exceptions import ( + ReplicationApplierConfigError, + ReplicationApplierStateError, + ReplicationClusterInventoryError, + ReplicationDumpError, + ReplicationInventoryError, + ReplicationLoggerStateError, + ReplicationServerIDError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params + + +class Replication: + """Replication API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def inventory( + self, + batch_id: str, + include_system: Optional[bool] = None, + all_databases: Optional[bool] = None, + collection: Optional[bool] = None, + db_server: Optional[str] = None, + ) -> Result[Json]: + """ + Return an overview of collections and indexes. + + Args: + batch_id (str): Batch ID. + include_system (bool | None): Include system collections. + all_databases (bool | None): Include all databases (only on "_system"). + collection (bool | None): If this parameter is set, the + response will be restricted to a single collection (the one specified), + and no views will be returned. + db_server (str | None): On a Coordinator, this request must have a + DBserver query parameter + + Returns: + dict: Overview of collections and indexes. + + Raises: + ReplicationInventoryError: If retrieval fails. + + References: + - `get-a-replication-inventory `__ + """ # noqa: E501 + params: Params = dict() + params["batchId"] = batch_id + if include_system is not None: + params["includeSystem"] = include_system + if all_databases is not None: + params["global"] = all_databases + if collection is not None: + params["collection"] = collection + if db_server is not None: + params["DBServer"] = db_server + + request = Request( + method=Method.GET, + endpoint="/_api/replication/inventory", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationInventoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def dump( + self, + collection: str, + batch_id: Optional[str] = None, + chunk_size: Optional[int] = None, + ) -> Result[bytes]: + """Return the events data of one collection. + + Args: + collection (str): ID of the collection to dump. + batch_id (str | None): Batch ID. + chunk_size (int | None): Size of the result in bytes. This value is honored + approximately only. + + Returns: + bytes: Collection events data. + + Raises: + ReplicationDumpError: If retrieval fails. + + References: + - `get-a-replication-dump `__ + """ # noqa: E501 + params: Params = dict() + params["collection"] = collection + if batch_id is not None: + params["batchId"] = batch_id + if chunk_size is not None: + params["chunkSize"] = chunk_size + + request = Request( + method=Method.GET, + endpoint="/_api/replication/dump", + params=params, + ) + + def response_handler(resp: Response) -> bytes: + if not resp.is_success: + raise ReplicationDumpError(resp, request) + return resp.raw_body + + return await self._executor.execute(request, response_handler) + + async def cluster_inventory( + self, include_system: Optional[bool] = None + ) -> Result[Json]: + """Return an overview of collections and indexes in a cluster. + + Args: + include_system (bool | None): Include system collections. + + Returns: + dict: Overview of collections and indexes in the cluster. + + Raises: + ReplicationClusterInventoryError: If retrieval fails. + + References: + - `get-the-cluster-collections-and-indexes `__ + """ # noqa: E501 + params: Params = {} + if include_system is not None: + params["includeSystem"] = include_system + + request = Request( + method=Method.GET, + endpoint="/_api/replication/clusterInventory", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationClusterInventoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def logger_state(self) -> Result[Json]: + """Return the state of the replication logger. + + Returns: + dict: Logger state. + + Raises: + ReplicationLoggerStateError: If retrieval fails. + + References: + - `get-the-replication-logger-state `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/logger-state", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationLoggerStateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def applier_config(self) -> Result[Json]: + """Return the configuration of the replication applier. + + Returns: + dict: Configuration of the replication applier. + + Raises: + ReplicationApplierConfigError: If retrieval fails. + + References: + - `get-the-replication-applier-configuration `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/applier-config", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationApplierConfigError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def applier_state(self) -> Result[Json]: + """Return the state of the replication applier. + + Returns: + dict: State of the replication applier. + + Raises: + ReplicationApplierStateError: If retrieval fails. + + References: + - `get-the-replication-applier-state `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/applier-state", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationApplierStateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def server_id(self) -> Result[str]: + """Return the current server's ID. + + Returns: + str: Server ID. + + Raises: + ReplicationServerIDError: If retrieval fails. + + References: + - `get-the-replication-server-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/server-id", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ReplicationServerIDError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["serverId"]) + + return await self._executor.execute(request, response_handler) diff --git a/docs/migration.rst b/docs/migration.rst index 7c2427e..0353a0d 100644 --- a/docs/migration.rst +++ b/docs/migration.rst @@ -51,7 +51,7 @@ this is not always consistent. The asynchronous driver, however, tries to stick to a simple rule: -* If the API returns a camel case key, it will be returned as is. +* If the API returns a camel case key, it will be returned as is. The response is returned from the server as is. * Parameters passed from client to server use the snake case equivalent of the camel case keys required by the API (e.g. `userName` becomes `user_name`). This is done to ensure PEP8 compatibility. @@ -74,6 +74,13 @@ Serialization Check out the :ref:`Serialization` section to learn more about how to implement your own serializer/deserializer. The current driver makes use of generic types and allows for a higher degree of customization. +Replication +=========== + +Although a minimal replication API is available for observability purposes, its use is not recommended. +Most of these are internal APIs that are not meant to be used by the end user. If you need to make any changes +to replication, please do so from the cluster web interface. + Mixing sync and async ===================== diff --git a/tests/test_database.py b/tests/test_database.py index 425007b..33dcc56 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -20,6 +20,13 @@ DatabaseSupportInfoError, JWTSecretListError, JWTSecretReloadError, + ReplicationApplierConfigError, + ReplicationApplierStateError, + ReplicationClusterInventoryError, + ReplicationDumpError, + ReplicationInventoryError, + ReplicationLoggerStateError, + ReplicationServerIDError, ServerApiCallsError, ServerAvailableOptionsGetError, ServerCheckAvailabilityError, @@ -190,6 +197,36 @@ async def test_metrics(db, bad_db): assert isinstance(metrics, str) +@pytest.mark.asyncio +async def test_replication(db, bad_db, cluster): + with pytest.raises(ReplicationInventoryError): + await bad_db.replication.inventory("id") + with pytest.raises(ReplicationDumpError): + await bad_db.replication.dump("test_collection") + if cluster: + with pytest.raises(ReplicationClusterInventoryError): + await bad_db.replication.cluster_inventory() + result = await db.replication.cluster_inventory() + assert isinstance(result, dict) + if not cluster: + with pytest.raises(ReplicationLoggerStateError): + await bad_db.replication.logger_state() + result = await db.replication.logger_state() + assert isinstance(result, dict) + with pytest.raises(ReplicationApplierConfigError): + await bad_db.replication.applier_config() + result = await db.replication.applier_config() + assert isinstance(result, dict) + with pytest.raises(ReplicationApplierStateError): + await bad_db.replication.applier_state() + result = await db.replication.applier_state() + assert isinstance(result, dict) + with pytest.raises(ReplicationServerIDError): + await bad_db.replication.server_id() + result = await db.replication.server_id() + assert isinstance(result, str) + + @pytest.mark.asyncio async def test_logs(sys_db, bad_db): with pytest.raises(ServerReadLogError): From 12530de10b9c1244d06c5002ad5dd35d9279b60c Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 18 Aug 2025 06:49:52 +0000 Subject: [PATCH 14/14] Version 1.0.0 --- CONTRIBUTING.md | 2 +- README.md | 2 +- arangoasync/version.py | 2 +- docs/index.rst | 3 +-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 375d8b0..66044c4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,7 +10,7 @@ pre-commit install # Install git pre-commit hooks Run unit tests with coverage: ```shell -pytest --cov=arango --cov-report=html # Open htmlcov/index.html in your browser +pytest --enterprise --cluster --cov=arango --cov-report=html # Open htmlcov/index.html in your browser ``` To start and ArangoDB instance locally, run: diff --git a/README.md b/README.md index ab24eae..b80d633 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ database natively supporting documents, graphs and search. This is the _asyncio_ alternative of the [python-arango](https://github.com/arangodb/python-arango) driver. -**Note: This project is still in active development, features might be added or removed.** +Check out a demo app at [python-arango-async-demo](https://github.com/apetenchea/python-arango-async-demo). ## Requirements diff --git a/arangoasync/version.py b/arangoasync/version.py index b1a19e3..5becc17 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "0.0.5" +__version__ = "1.0.0" diff --git a/docs/index.rst b/docs/index.rst index 0fab3ac..52714c3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,8 +6,7 @@ python-arango-async ------------------- Welcome to the documentation for python-arango-async_, a Python driver for ArangoDB_. - -**Note: This project is still in active development, features might be added or removed.** +You can check out a demo app at [python-arango-async-demo](https://github.com/apetenchea/python-arango-async-demo). Requirements =============