From 0e812b3963823377948322aa573492db5c9cd9b9 Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Thu, 4 Sep 2025 18:10:53 -0400 Subject: [PATCH 01/10] Update trunk config --- .trunk/trunk.yaml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/.trunk/trunk.yaml b/.trunk/trunk.yaml index 4445bac..de8b3a3 100644 --- a/.trunk/trunk.yaml +++ b/.trunk/trunk.yaml @@ -2,18 +2,18 @@ # To learn more about the format of this file, see https://docs.trunk.io/reference/trunk-yaml version: 0.1 cli: - version: 1.22.10 + version: 1.25.0 # Trunk provides extensibility via plugins. (https://docs.trunk.io/plugins) plugins: sources: - id: trunk - ref: v1.6.7 + ref: v1.7.2 uri: https://github.com/trunk-io/plugins # Many linters and tools depend on runtimes - configure them here. (https://docs.trunk.io/runtimes) runtimes: enabled: - go@1.21.0 - - node@18.20.5 + - node@22.16.0 - python@3.10.8 # This is the section where you manage your linters. (https://docs.trunk.io/check/configuration) lint: @@ -23,23 +23,23 @@ lint: - pydgraph/proto/api_pb*.py enabled: - - trivy@0.59.1 + - trivy@0.66.0 - trivy@0.58.2 - - renovate@39.161.0 + - renovate@41.96.1 - actionlint@1.7.7 - - bandit@1.8.2 + - bandit@1.8.6 - black@25.1.0 - - checkov@3.2.365 + - checkov@3.2.469 - git-diff-check - - isort@6.0.0 - - markdownlint@0.44.0 - - prettier@3.4.2 - - ruff@0.9.4 - - shellcheck@0.10.0 + - isort@6.0.1 + - markdownlint@0.45.0 + - prettier@3.6.2 + - ruff@0.12.11 + - shellcheck@0.11.0 - shfmt@3.6.0 - - taplo@0.9.3 - - trufflehog@3.88.4 - - yamllint@1.35.1 + - taplo@0.10.0 + - trufflehog@3.90.5 + - yamllint@1.37.1 actions: enabled: - trunk-announce From 959cc1ac72adfd9ec29a4b19bf67633e94f36bae Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Thu, 4 Sep 2025 18:11:35 -0400 Subject: [PATCH 02/10] Convert whitelist to open/all ipv4 addresses Future proof docker network changes --- examples/tls/docker-compose.yml | 12 ++++++------ tests/docker-compose.yml | 9 +++------ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/examples/tls/docker-compose.yml b/examples/tls/docker-compose.yml index c6e8c9b..9bc1d31 100644 --- a/examples/tls/docker-compose.yml +++ b/examples/tls/docker-compose.yml @@ -67,8 +67,8 @@ services: command: dgraph alpha --port_offset 0 --my=alpha1:7080 --zero=zero1:5080 --expose_trace --trace ratio=1.0 --profile_mode block --block_rate 10 --logtostderr -v=2 --security - whitelist=10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 --acl - "secret-file=/dgraph-acl/hmac-secret;access-ttl=3s;refresh-ttl=5s" --tls + whitelist=0.0.0.0/0 --acl "secret-file=/dgraph-acl/hmac-secret;access-ttl=3s;refresh-ttl=5s" + --tls "client-auth-type=REQUIREANDVERIFY;ca-cert=/dgraph-tls/ca.crt;server-cert=/dgraph-tls/node.crt;server-key=/dgraph-tls/node.key" alpha2: @@ -95,8 +95,8 @@ services: command: dgraph alpha --port_offset 2 --my=alpha2:7082 --zero=zero1:5080 --expose_trace --trace ratio=1.0 --profile_mode block --block_rate 10 --logtostderr -v=2 --security - whitelist=10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 --acl - "secret-file=/dgraph-acl/hmac-secret;access-ttl=3s;refresh-ttl=5s" --tls + whitelist=0.0.0.0/0 --acl "secret-file=/dgraph-acl/hmac-secret;access-ttl=3s;refresh-ttl=5s" + --tls "client-auth-type=REQUIREANDVERIFY;ca-cert=/dgraph-tls/ca.crt;server-cert=/dgraph-tls/node.crt;server-key=/dgraph-tls/node.key" alpha3: @@ -123,6 +123,6 @@ services: command: dgraph alpha --port_offset 3 --my=alpha3:7083 --zero=zero1:5080 --expose_trace --trace ratio=1.0 --profile_mode block --block_rate 10 --logtostderr -v=2 --security - whitelist=10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 --acl - "secret-file=/dgraph-acl/hmac-secret;access-ttl=3s;refresh-ttl=5s" --tls + whitelist=0.0.0.0/0 --acl "secret-file=/dgraph-acl/hmac-secret;access-ttl=3s;refresh-ttl=5s" + --tls "client-auth-type=REQUIREANDVERIFY;ca-cert=/dgraph-tls/ca.crt;server-cert=/dgraph-tls/node.crt;server-key=/dgraph-tls/node.key" diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml index 33b6b4d..e28dfe4 100644 --- a/tests/docker-compose.yml +++ b/tests/docker-compose.yml @@ -12,8 +12,7 @@ services: read_only: true command: dgraph alpha --my=alpha1:7080 --zero=zero1:5080 --logtostderr -v=2 --raft "idx=1; group=1" - --security "whitelist=10.0.0.0/8,172.16.0.0/12,192.168.0.0/16;" --acl - "secret-file=/secret/hmac;" + --security "whitelist=0.0.0.0/0;" --acl "secret-file=/secret/hmac;" alpha2: image: dgraph/dgraph:${DGRAPH_IMAGE_TAG:-latest} @@ -28,8 +27,7 @@ services: read_only: true command: dgraph alpha --my=alpha2:7080 --zero=zero1:5080 --logtostderr -v=2 --raft "idx=2; group=1" - --security "whitelist=10.0.0.0/8,172.16.0.0/12,192.168.0.0/16;" --acl - "secret-file=/secret/hmac;" + --security "whitelist=0.0.0.0/0;" --acl "secret-file=/secret/hmac;" alpha3: image: dgraph/dgraph:${DGRAPH_IMAGE_TAG:-latest} @@ -44,8 +42,7 @@ services: read_only: true command: dgraph alpha --my=alpha3:7080 --zero=zero1:5080 --logtostderr -v=2 --raft "idx=3; group=1" - --security "whitelist=10.0.0.0/8,172.16.0.0/12,192.168.0.0/16;" --acl - "secret-file=/secret/hmac;" + --security "whitelist=0.0.0.0/0;" --acl "secret-file=/secret/hmac;" zero1: image: dgraph/dgraph:${DGRAPH_IMAGE_TAG:-latest} From 08b2c207ac557f0d606bc31df0bb7e49396251a6 Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Thu, 4 Sep 2025 18:11:50 -0400 Subject: [PATCH 03/10] Update proto and generated --- pydgraph/proto/api.proto | 81 ++++++++- pydgraph/proto/api_pb2.py | 48 +++++- pydgraph/proto/api_pb2_grpc.py | 301 +++++++++++++++++++++++++++++++++ 3 files changed, 425 insertions(+), 5 deletions(-) diff --git a/pydgraph/proto/api.proto b/pydgraph/proto/api.proto index 78df31b..4949b7f 100644 --- a/pydgraph/proto/api.proto +++ b/pydgraph/proto/api.proto @@ -12,7 +12,7 @@ syntax = "proto3"; package api; -option go_package = "github.com/dgraph-io/dgo/v240/protos/api"; +option go_package = "github.com/dgraph-io/dgo/v250/protos/api"; option java_package = "io.dgraph"; option java_outer_classname = "DgraphProto"; @@ -24,6 +24,16 @@ service Dgraph { rpc Alter (Operation) returns (Payload) {} rpc CommitOrAbort (TxnContext) returns (TxnContext) {} rpc CheckVersion(Check) returns (Version) {} + + rpc RunDQL(RunDQLRequest) returns (Response) {} + rpc AllocateIDs(AllocateIDsRequest) returns (AllocateIDsResponse) {} + + rpc UpdateExtSnapshotStreamingState(UpdateExtSnapshotStreamingStateRequest) returns (UpdateExtSnapshotStreamingStateResponse) {} + rpc StreamExtSnapshot(stream StreamExtSnapshotRequest) returns (StreamExtSnapshotResponse) {} + + rpc CreateNamespace(CreateNamespaceRequest) returns (CreateNamespaceResponse) {} + rpc DropNamespace(DropNamespaceRequest) returns (DropNamespaceResponse) {} + rpc ListNamespaces(ListNamespacesRequest) returns (ListNamespacesResponse) {} } message Request { @@ -192,4 +202,73 @@ message Jwt { string refresh_jwt = 2; } +message RunDQLRequest { + string dql_query = 1; + map vars = 2; + bool read_only = 3; + bool best_effort = 4; + Request.RespFormat resp_format = 5; +} + +enum LeaseType { + NS = 0; + UID = 1; + TS = 2; +} + +message AllocateIDsRequest { + uint64 how_many = 1; + LeaseType lease_type = 2; +} + +message AllocateIDsResponse { + uint64 start = 1; + uint64 end = 2; // inclusive +} + +message CreateNamespaceRequest {} + +message CreateNamespaceResponse { + uint64 namespace = 1; +} + +message DropNamespaceRequest { + uint64 namespace = 1; +} + +message DropNamespaceResponse {} + +message ListNamespacesRequest {} + +message ListNamespacesResponse { + map namespaces = 1; +} + +message Namespace { + uint64 id = 1; +} + +message UpdateExtSnapshotStreamingStateRequest { + bool start = 1; + bool finish = 2; + bool drop_data = 3; +} + +message UpdateExtSnapshotStreamingStateResponse { + repeated uint32 groups = 1; +} + +message StreamExtSnapshotRequest { + uint32 group_id = 1; + bool forward = 2; + StreamPacket pkt = 3; +} + +message StreamExtSnapshotResponse {} + +message StreamPacket { + bytes data = 1; + bool done = 2; +} + // vim: noexpandtab sw=2 ts=2 diff --git a/pydgraph/proto/api_pb2.py b/pydgraph/proto/api_pb2.py index 413f23b..d09e131 100644 --- a/pydgraph/proto/api_pb2.py +++ b/pydgraph/proto/api_pb2.py @@ -14,14 +14,14 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\tapi.proto\x12\x03\x61pi\"\xb8\x02\n\x07Request\x12\x10\n\x08start_ts\x18\x01 \x01(\x04\x12\r\n\x05query\x18\x04 \x01(\t\x12$\n\x04vars\x18\x05 \x03(\x0b\x32\x16.api.Request.VarsEntry\x12\x11\n\tread_only\x18\x06 \x01(\x08\x12\x13\n\x0b\x62\x65st_effort\x18\x07 \x01(\x08\x12 \n\tmutations\x18\x0c \x03(\x0b\x32\r.api.Mutation\x12\x12\n\ncommit_now\x18\r \x01(\x08\x12,\n\x0bresp_format\x18\x0e \x01(\x0e\x32\x17.api.Request.RespFormat\x12\x0c\n\x04hash\x18\x0f \x01(\t\x1a+\n\tVarsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1f\n\nRespFormat\x12\x08\n\x04JSON\x10\x00\x12\x07\n\x03RDF\x10\x01\"\x14\n\x04Uids\x12\x0c\n\x04uids\x18\x01 \x03(\t\"\x1d\n\x0cListOfString\x12\r\n\x05value\x18\x01 \x03(\t\"\xbc\x02\n\x08Response\x12\x0c\n\x04json\x18\x01 \x01(\x0c\x12\x1c\n\x03txn\x18\x02 \x01(\x0b\x32\x0f.api.TxnContext\x12\x1d\n\x07latency\x18\x03 \x01(\x0b\x32\x0c.api.Latency\x12\x1d\n\x07metrics\x18\x04 \x01(\x0b\x32\x0c.api.Metrics\x12%\n\x04uids\x18\x0c \x03(\x0b\x32\x17.api.Response.UidsEntry\x12\x0b\n\x03rdf\x18\r \x01(\x0c\x12%\n\x04hdrs\x18\x0e \x03(\x0b\x32\x17.api.Response.HdrsEntry\x1a+\n\tUidsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a>\n\tHdrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.api.ListOfString:\x02\x38\x01\"\xad\x01\n\x08Mutation\x12\x10\n\x08set_json\x18\x01 \x01(\x0c\x12\x13\n\x0b\x64\x65lete_json\x18\x02 \x01(\x0c\x12\x12\n\nset_nquads\x18\x03 \x01(\x0c\x12\x12\n\ndel_nquads\x18\x04 \x01(\x0c\x12\x17\n\x03set\x18\x05 \x03(\x0b\x32\n.api.NQuad\x12\x17\n\x03\x64\x65l\x18\x06 \x03(\x0b\x32\n.api.NQuad\x12\x0c\n\x04\x63ond\x18\t \x01(\t\x12\x12\n\ncommit_now\x18\x0e \x01(\x08\"\xd2\x01\n\tOperation\x12\x0e\n\x06schema\x18\x01 \x01(\t\x12\x11\n\tdrop_attr\x18\x02 \x01(\t\x12\x10\n\x08\x64rop_all\x18\x03 \x01(\x08\x12&\n\x07\x64rop_op\x18\x04 \x01(\x0e\x32\x15.api.Operation.DropOp\x12\x12\n\ndrop_value\x18\x05 \x01(\t\x12\x19\n\x11run_in_background\x18\x06 \x01(\x08\"9\n\x06\x44ropOp\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\x08\n\x04\x44\x41TA\x10\x02\x12\x08\n\x04\x41TTR\x10\x03\x12\x08\n\x04TYPE\x10\x04\"\x17\n\x07Payload\x12\x0c\n\x04\x44\x61ta\x18\x01 \x01(\x0c\"m\n\nTxnContext\x12\x10\n\x08start_ts\x18\x01 \x01(\x04\x12\x11\n\tcommit_ts\x18\x02 \x01(\x04\x12\x0f\n\x07\x61\x62orted\x18\x03 \x01(\x08\x12\x0c\n\x04keys\x18\x04 \x03(\t\x12\r\n\x05preds\x18\x05 \x03(\t\x12\x0c\n\x04hash\x18\x06 \x01(\t\"\x07\n\x05\x43heck\"\x16\n\x07Version\x12\x0b\n\x03tag\x18\x01 \x01(\t\"x\n\x07Latency\x12\x12\n\nparsing_ns\x18\x01 \x01(\x04\x12\x15\n\rprocessing_ns\x18\x02 \x01(\x04\x12\x13\n\x0b\x65ncoding_ns\x18\x03 \x01(\x04\x12\x1b\n\x13\x61ssign_timestamp_ns\x18\x04 \x01(\x04\x12\x10\n\x08total_ns\x18\x05 \x01(\x04\"f\n\x07Metrics\x12+\n\x08num_uids\x18\x01 \x03(\x0b\x32\x19.api.Metrics.NumUidsEntry\x1a.\n\x0cNumUidsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x04:\x02\x38\x01\"\xa3\x01\n\x05NQuad\x12\x0f\n\x07subject\x18\x01 \x01(\t\x12\x11\n\tpredicate\x18\x02 \x01(\t\x12\x11\n\tobject_id\x18\x03 \x01(\t\x12 \n\x0cobject_value\x18\x04 \x01(\x0b\x32\n.api.Value\x12\x0c\n\x04lang\x18\x06 \x01(\t\x12\x1a\n\x06\x66\x61\x63\x65ts\x18\x07 \x03(\x0b\x32\n.api.Facet\x12\x11\n\tnamespace\x18\x08 \x01(\x04J\x04\x08\x05\x10\x06\"\xa4\x02\n\x05Value\x12\x15\n\x0b\x64\x65\x66\x61ult_val\x18\x01 \x01(\tH\x00\x12\x13\n\tbytes_val\x18\x02 \x01(\x0cH\x00\x12\x11\n\x07int_val\x18\x03 \x01(\x03H\x00\x12\x12\n\x08\x62ool_val\x18\x04 \x01(\x08H\x00\x12\x11\n\x07str_val\x18\x05 \x01(\tH\x00\x12\x14\n\ndouble_val\x18\x06 \x01(\x01H\x00\x12\x11\n\x07geo_val\x18\x07 \x01(\x0cH\x00\x12\x12\n\x08\x64\x61te_val\x18\x08 \x01(\x0cH\x00\x12\x16\n\x0c\x64\x61tetime_val\x18\t \x01(\x0cH\x00\x12\x16\n\x0cpassword_val\x18\n \x01(\tH\x00\x12\x11\n\x07uid_val\x18\x0b \x01(\x04H\x00\x12\x16\n\x0c\x62igfloat_val\x18\x0c \x01(\x0cH\x00\x12\x16\n\x0cvfloat32_val\x18\r \x01(\x0cH\x00\x42\x05\n\x03val\"\xab\x01\n\x05\x46\x61\x63\x65t\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x12$\n\x08val_type\x18\x03 \x01(\x0e\x32\x12.api.Facet.ValType\x12\x0e\n\x06tokens\x18\x04 \x03(\t\x12\r\n\x05\x61lias\x18\x05 \x01(\t\"A\n\x07ValType\x12\n\n\x06STRING\x10\x00\x12\x07\n\x03INT\x10\x01\x12\t\n\x05\x46LOAT\x10\x02\x12\x08\n\x04\x42OOL\x10\x03\x12\x0c\n\x08\x44\x41TETIME\x10\x04\"Z\n\x0cLoginRequest\x12\x0e\n\x06userid\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\x12\x15\n\rrefresh_token\x18\x03 \x01(\t\x12\x11\n\tnamespace\x18\x04 \x01(\x04\".\n\x03Jwt\x12\x12\n\naccess_jwt\x18\x01 \x01(\t\x12\x13\n\x0brefresh_jwt\x18\x02 \x01(\t2\xe7\x01\n\x06\x44graph\x12+\n\x05Login\x12\x11.api.LoginRequest\x1a\r.api.Response\"\x00\x12&\n\x05Query\x12\x0c.api.Request\x1a\r.api.Response\"\x00\x12\'\n\x05\x41lter\x12\x0e.api.Operation\x1a\x0c.api.Payload\"\x00\x12\x33\n\rCommitOrAbort\x12\x0f.api.TxnContext\x1a\x0f.api.TxnContext\"\x00\x12*\n\x0c\x43heckVersion\x12\n.api.Check\x1a\x0c.api.Version\"\x00\x42\x42\n\tio.dgraphB\x0b\x44graphProtoZ(github.com/dgraph-io/dgo/v240/protos/apib\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\tapi.proto\x12\x03\x61pi\"\xb8\x02\n\x07Request\x12\x10\n\x08start_ts\x18\x01 \x01(\x04\x12\r\n\x05query\x18\x04 \x01(\t\x12$\n\x04vars\x18\x05 \x03(\x0b\x32\x16.api.Request.VarsEntry\x12\x11\n\tread_only\x18\x06 \x01(\x08\x12\x13\n\x0b\x62\x65st_effort\x18\x07 \x01(\x08\x12 \n\tmutations\x18\x0c \x03(\x0b\x32\r.api.Mutation\x12\x12\n\ncommit_now\x18\r \x01(\x08\x12,\n\x0bresp_format\x18\x0e \x01(\x0e\x32\x17.api.Request.RespFormat\x12\x0c\n\x04hash\x18\x0f \x01(\t\x1a+\n\tVarsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1f\n\nRespFormat\x12\x08\n\x04JSON\x10\x00\x12\x07\n\x03RDF\x10\x01\"\x14\n\x04Uids\x12\x0c\n\x04uids\x18\x01 \x03(\t\"\x1d\n\x0cListOfString\x12\r\n\x05value\x18\x01 \x03(\t\"\xbc\x02\n\x08Response\x12\x0c\n\x04json\x18\x01 \x01(\x0c\x12\x1c\n\x03txn\x18\x02 \x01(\x0b\x32\x0f.api.TxnContext\x12\x1d\n\x07latency\x18\x03 \x01(\x0b\x32\x0c.api.Latency\x12\x1d\n\x07metrics\x18\x04 \x01(\x0b\x32\x0c.api.Metrics\x12%\n\x04uids\x18\x0c \x03(\x0b\x32\x17.api.Response.UidsEntry\x12\x0b\n\x03rdf\x18\r \x01(\x0c\x12%\n\x04hdrs\x18\x0e \x03(\x0b\x32\x17.api.Response.HdrsEntry\x1a+\n\tUidsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a>\n\tHdrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.api.ListOfString:\x02\x38\x01\"\xad\x01\n\x08Mutation\x12\x10\n\x08set_json\x18\x01 \x01(\x0c\x12\x13\n\x0b\x64\x65lete_json\x18\x02 \x01(\x0c\x12\x12\n\nset_nquads\x18\x03 \x01(\x0c\x12\x12\n\ndel_nquads\x18\x04 \x01(\x0c\x12\x17\n\x03set\x18\x05 \x03(\x0b\x32\n.api.NQuad\x12\x17\n\x03\x64\x65l\x18\x06 \x03(\x0b\x32\n.api.NQuad\x12\x0c\n\x04\x63ond\x18\t \x01(\t\x12\x12\n\ncommit_now\x18\x0e \x01(\x08\"\xd2\x01\n\tOperation\x12\x0e\n\x06schema\x18\x01 \x01(\t\x12\x11\n\tdrop_attr\x18\x02 \x01(\t\x12\x10\n\x08\x64rop_all\x18\x03 \x01(\x08\x12&\n\x07\x64rop_op\x18\x04 \x01(\x0e\x32\x15.api.Operation.DropOp\x12\x12\n\ndrop_value\x18\x05 \x01(\t\x12\x19\n\x11run_in_background\x18\x06 \x01(\x08\"9\n\x06\x44ropOp\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\x08\n\x04\x44\x41TA\x10\x02\x12\x08\n\x04\x41TTR\x10\x03\x12\x08\n\x04TYPE\x10\x04\"\x17\n\x07Payload\x12\x0c\n\x04\x44\x61ta\x18\x01 \x01(\x0c\"m\n\nTxnContext\x12\x10\n\x08start_ts\x18\x01 \x01(\x04\x12\x11\n\tcommit_ts\x18\x02 \x01(\x04\x12\x0f\n\x07\x61\x62orted\x18\x03 \x01(\x08\x12\x0c\n\x04keys\x18\x04 \x03(\t\x12\r\n\x05preds\x18\x05 \x03(\t\x12\x0c\n\x04hash\x18\x06 \x01(\t\"\x07\n\x05\x43heck\"\x16\n\x07Version\x12\x0b\n\x03tag\x18\x01 \x01(\t\"x\n\x07Latency\x12\x12\n\nparsing_ns\x18\x01 \x01(\x04\x12\x15\n\rprocessing_ns\x18\x02 \x01(\x04\x12\x13\n\x0b\x65ncoding_ns\x18\x03 \x01(\x04\x12\x1b\n\x13\x61ssign_timestamp_ns\x18\x04 \x01(\x04\x12\x10\n\x08total_ns\x18\x05 \x01(\x04\"f\n\x07Metrics\x12+\n\x08num_uids\x18\x01 \x03(\x0b\x32\x19.api.Metrics.NumUidsEntry\x1a.\n\x0cNumUidsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x04:\x02\x38\x01\"\xa3\x01\n\x05NQuad\x12\x0f\n\x07subject\x18\x01 \x01(\t\x12\x11\n\tpredicate\x18\x02 \x01(\t\x12\x11\n\tobject_id\x18\x03 \x01(\t\x12 \n\x0cobject_value\x18\x04 \x01(\x0b\x32\n.api.Value\x12\x0c\n\x04lang\x18\x06 \x01(\t\x12\x1a\n\x06\x66\x61\x63\x65ts\x18\x07 \x03(\x0b\x32\n.api.Facet\x12\x11\n\tnamespace\x18\x08 \x01(\x04J\x04\x08\x05\x10\x06\"\xa4\x02\n\x05Value\x12\x15\n\x0b\x64\x65\x66\x61ult_val\x18\x01 \x01(\tH\x00\x12\x13\n\tbytes_val\x18\x02 \x01(\x0cH\x00\x12\x11\n\x07int_val\x18\x03 \x01(\x03H\x00\x12\x12\n\x08\x62ool_val\x18\x04 \x01(\x08H\x00\x12\x11\n\x07str_val\x18\x05 \x01(\tH\x00\x12\x14\n\ndouble_val\x18\x06 \x01(\x01H\x00\x12\x11\n\x07geo_val\x18\x07 \x01(\x0cH\x00\x12\x12\n\x08\x64\x61te_val\x18\x08 \x01(\x0cH\x00\x12\x16\n\x0c\x64\x61tetime_val\x18\t \x01(\x0cH\x00\x12\x16\n\x0cpassword_val\x18\n \x01(\tH\x00\x12\x11\n\x07uid_val\x18\x0b \x01(\x04H\x00\x12\x16\n\x0c\x62igfloat_val\x18\x0c \x01(\x0cH\x00\x12\x16\n\x0cvfloat32_val\x18\r \x01(\x0cH\x00\x42\x05\n\x03val\"\xab\x01\n\x05\x46\x61\x63\x65t\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x12$\n\x08val_type\x18\x03 \x01(\x0e\x32\x12.api.Facet.ValType\x12\x0e\n\x06tokens\x18\x04 \x03(\t\x12\r\n\x05\x61lias\x18\x05 \x01(\t\"A\n\x07ValType\x12\n\n\x06STRING\x10\x00\x12\x07\n\x03INT\x10\x01\x12\t\n\x05\x46LOAT\x10\x02\x12\x08\n\x04\x42OOL\x10\x03\x12\x0c\n\x08\x44\x41TETIME\x10\x04\"Z\n\x0cLoginRequest\x12\x0e\n\x06userid\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\x12\x15\n\rrefresh_token\x18\x03 \x01(\t\x12\x11\n\tnamespace\x18\x04 \x01(\x04\".\n\x03Jwt\x12\x12\n\naccess_jwt\x18\x01 \x01(\t\x12\x13\n\x0brefresh_jwt\x18\x02 \x01(\t\"\xd1\x01\n\rRunDQLRequest\x12\x11\n\tdql_query\x18\x01 \x01(\t\x12*\n\x04vars\x18\x02 \x03(\x0b\x32\x1c.api.RunDQLRequest.VarsEntry\x12\x11\n\tread_only\x18\x03 \x01(\x08\x12\x13\n\x0b\x62\x65st_effort\x18\x04 \x01(\x08\x12,\n\x0bresp_format\x18\x05 \x01(\x0e\x32\x17.api.Request.RespFormat\x1a+\n\tVarsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"J\n\x12\x41llocateIDsRequest\x12\x10\n\x08how_many\x18\x01 \x01(\x04\x12\"\n\nlease_type\x18\x02 \x01(\x0e\x32\x0e.api.LeaseType\"1\n\x13\x41llocateIDsResponse\x12\r\n\x05start\x18\x01 \x01(\x04\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x04\"\x18\n\x16\x43reateNamespaceRequest\",\n\x17\x43reateNamespaceResponse\x12\x11\n\tnamespace\x18\x01 \x01(\x04\")\n\x14\x44ropNamespaceRequest\x12\x11\n\tnamespace\x18\x01 \x01(\x04\"\x17\n\x15\x44ropNamespaceResponse\"\x17\n\x15ListNamespacesRequest\"\x9c\x01\n\x16ListNamespacesResponse\x12?\n\nnamespaces\x18\x01 \x03(\x0b\x32+.api.ListNamespacesResponse.NamespacesEntry\x1a\x41\n\x0fNamespacesEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\x1d\n\x05value\x18\x02 \x01(\x0b\x32\x0e.api.Namespace:\x02\x38\x01\"\x17\n\tNamespace\x12\n\n\x02id\x18\x01 \x01(\x04\"Z\n&UpdateExtSnapshotStreamingStateRequest\x12\r\n\x05start\x18\x01 \x01(\x08\x12\x0e\n\x06\x66inish\x18\x02 \x01(\x08\x12\x11\n\tdrop_data\x18\x03 \x01(\x08\"9\n\'UpdateExtSnapshotStreamingStateResponse\x12\x0e\n\x06groups\x18\x01 \x03(\r\"]\n\x18StreamExtSnapshotRequest\x12\x10\n\x08group_id\x18\x01 \x01(\r\x12\x0f\n\x07\x66orward\x18\x02 \x01(\x08\x12\x1e\n\x03pkt\x18\x03 \x01(\x0b\x32\x11.api.StreamPacket\"\x1b\n\x19StreamExtSnapshotResponse\"*\n\x0cStreamPacket\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x0c\n\x04\x64one\x18\x02 \x01(\x08*$\n\tLeaseType\x12\x06\n\x02NS\x10\x00\x12\x07\n\x03UID\x10\x01\x12\x06\n\x02TS\x10\x02\x32\x99\x06\n\x06\x44graph\x12+\n\x05Login\x12\x11.api.LoginRequest\x1a\r.api.Response\"\x00\x12&\n\x05Query\x12\x0c.api.Request\x1a\r.api.Response\"\x00\x12\'\n\x05\x41lter\x12\x0e.api.Operation\x1a\x0c.api.Payload\"\x00\x12\x33\n\rCommitOrAbort\x12\x0f.api.TxnContext\x1a\x0f.api.TxnContext\"\x00\x12*\n\x0c\x43heckVersion\x12\n.api.Check\x1a\x0c.api.Version\"\x00\x12-\n\x06RunDQL\x12\x12.api.RunDQLRequest\x1a\r.api.Response\"\x00\x12\x42\n\x0b\x41llocateIDs\x12\x17.api.AllocateIDsRequest\x1a\x18.api.AllocateIDsResponse\"\x00\x12~\n\x1fUpdateExtSnapshotStreamingState\x12+.api.UpdateExtSnapshotStreamingStateRequest\x1a,.api.UpdateExtSnapshotStreamingStateResponse\"\x00\x12V\n\x11StreamExtSnapshot\x12\x1d.api.StreamExtSnapshotRequest\x1a\x1e.api.StreamExtSnapshotResponse\"\x00(\x01\x12N\n\x0f\x43reateNamespace\x12\x1b.api.CreateNamespaceRequest\x1a\x1c.api.CreateNamespaceResponse\"\x00\x12H\n\rDropNamespace\x12\x19.api.DropNamespaceRequest\x1a\x1a.api.DropNamespaceResponse\"\x00\x12K\n\x0eListNamespaces\x12\x1a.api.ListNamespacesRequest\x1a\x1b.api.ListNamespacesResponse\"\x00\x42\x42\n\tio.dgraphB\x0b\x44graphProtoZ(github.com/dgraph-io/dgo/v250/protos/apib\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'api_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\tio.dgraphB\013DgraphProtoZ(github.com/dgraph-io/dgo/v240/protos/api' + _globals['DESCRIPTOR']._serialized_options = b'\n\tio.dgraphB\013DgraphProtoZ(github.com/dgraph-io/dgo/v250/protos/api' _globals['_REQUEST_VARSENTRY']._loaded_options = None _globals['_REQUEST_VARSENTRY']._serialized_options = b'8\001' _globals['_RESPONSE_UIDSENTRY']._loaded_options = None @@ -30,6 +30,12 @@ _globals['_RESPONSE_HDRSENTRY']._serialized_options = b'8\001' _globals['_METRICS_NUMUIDSENTRY']._loaded_options = None _globals['_METRICS_NUMUIDSENTRY']._serialized_options = b'8\001' + _globals['_RUNDQLREQUEST_VARSENTRY']._loaded_options = None + _globals['_RUNDQLREQUEST_VARSENTRY']._serialized_options = b'8\001' + _globals['_LISTNAMESPACESRESPONSE_NAMESPACESENTRY']._loaded_options = None + _globals['_LISTNAMESPACESRESPONSE_NAMESPACESENTRY']._serialized_options = b'8\001' + _globals['_LEASETYPE']._serialized_start=3271 + _globals['_LEASETYPE']._serialized_end=3307 _globals['_REQUEST']._serialized_start=19 _globals['_REQUEST']._serialized_end=331 _globals['_REQUEST_VARSENTRY']._serialized_start=255 @@ -78,6 +84,40 @@ _globals['_LOGINREQUEST']._serialized_end=2214 _globals['_JWT']._serialized_start=2216 _globals['_JWT']._serialized_end=2262 - _globals['_DGRAPH']._serialized_start=2265 - _globals['_DGRAPH']._serialized_end=2496 + _globals['_RUNDQLREQUEST']._serialized_start=2265 + _globals['_RUNDQLREQUEST']._serialized_end=2474 + _globals['_RUNDQLREQUEST_VARSENTRY']._serialized_start=255 + _globals['_RUNDQLREQUEST_VARSENTRY']._serialized_end=298 + _globals['_ALLOCATEIDSREQUEST']._serialized_start=2476 + _globals['_ALLOCATEIDSREQUEST']._serialized_end=2550 + _globals['_ALLOCATEIDSRESPONSE']._serialized_start=2552 + _globals['_ALLOCATEIDSRESPONSE']._serialized_end=2601 + _globals['_CREATENAMESPACEREQUEST']._serialized_start=2603 + _globals['_CREATENAMESPACEREQUEST']._serialized_end=2627 + _globals['_CREATENAMESPACERESPONSE']._serialized_start=2629 + _globals['_CREATENAMESPACERESPONSE']._serialized_end=2673 + _globals['_DROPNAMESPACEREQUEST']._serialized_start=2675 + _globals['_DROPNAMESPACEREQUEST']._serialized_end=2716 + _globals['_DROPNAMESPACERESPONSE']._serialized_start=2718 + _globals['_DROPNAMESPACERESPONSE']._serialized_end=2741 + _globals['_LISTNAMESPACESREQUEST']._serialized_start=2743 + _globals['_LISTNAMESPACESREQUEST']._serialized_end=2766 + _globals['_LISTNAMESPACESRESPONSE']._serialized_start=2769 + _globals['_LISTNAMESPACESRESPONSE']._serialized_end=2925 + _globals['_LISTNAMESPACESRESPONSE_NAMESPACESENTRY']._serialized_start=2860 + _globals['_LISTNAMESPACESRESPONSE_NAMESPACESENTRY']._serialized_end=2925 + _globals['_NAMESPACE']._serialized_start=2927 + _globals['_NAMESPACE']._serialized_end=2950 + _globals['_UPDATEEXTSNAPSHOTSTREAMINGSTATEREQUEST']._serialized_start=2952 + _globals['_UPDATEEXTSNAPSHOTSTREAMINGSTATEREQUEST']._serialized_end=3042 + _globals['_UPDATEEXTSNAPSHOTSTREAMINGSTATERESPONSE']._serialized_start=3044 + _globals['_UPDATEEXTSNAPSHOTSTREAMINGSTATERESPONSE']._serialized_end=3101 + _globals['_STREAMEXTSNAPSHOTREQUEST']._serialized_start=3103 + _globals['_STREAMEXTSNAPSHOTREQUEST']._serialized_end=3196 + _globals['_STREAMEXTSNAPSHOTRESPONSE']._serialized_start=3198 + _globals['_STREAMEXTSNAPSHOTRESPONSE']._serialized_end=3225 + _globals['_STREAMPACKET']._serialized_start=3227 + _globals['_STREAMPACKET']._serialized_end=3269 + _globals['_DGRAPH']._serialized_start=3310 + _globals['_DGRAPH']._serialized_end=4103 # @@protoc_insertion_point(module_scope) diff --git a/pydgraph/proto/api_pb2_grpc.py b/pydgraph/proto/api_pb2_grpc.py index 4b689dd..4c999ec 100644 --- a/pydgraph/proto/api_pb2_grpc.py +++ b/pydgraph/proto/api_pb2_grpc.py @@ -65,6 +65,41 @@ def __init__(self, channel): request_serializer=api__pb2.Check.SerializeToString, response_deserializer=api__pb2.Version.FromString, _registered_method=True) + self.RunDQL = channel.unary_unary( + '/api.Dgraph/RunDQL', + request_serializer=api__pb2.RunDQLRequest.SerializeToString, + response_deserializer=api__pb2.Response.FromString, + _registered_method=True) + self.AllocateIDs = channel.unary_unary( + '/api.Dgraph/AllocateIDs', + request_serializer=api__pb2.AllocateIDsRequest.SerializeToString, + response_deserializer=api__pb2.AllocateIDsResponse.FromString, + _registered_method=True) + self.UpdateExtSnapshotStreamingState = channel.unary_unary( + '/api.Dgraph/UpdateExtSnapshotStreamingState', + request_serializer=api__pb2.UpdateExtSnapshotStreamingStateRequest.SerializeToString, + response_deserializer=api__pb2.UpdateExtSnapshotStreamingStateResponse.FromString, + _registered_method=True) + self.StreamExtSnapshot = channel.stream_unary( + '/api.Dgraph/StreamExtSnapshot', + request_serializer=api__pb2.StreamExtSnapshotRequest.SerializeToString, + response_deserializer=api__pb2.StreamExtSnapshotResponse.FromString, + _registered_method=True) + self.CreateNamespace = channel.unary_unary( + '/api.Dgraph/CreateNamespace', + request_serializer=api__pb2.CreateNamespaceRequest.SerializeToString, + response_deserializer=api__pb2.CreateNamespaceResponse.FromString, + _registered_method=True) + self.DropNamespace = channel.unary_unary( + '/api.Dgraph/DropNamespace', + request_serializer=api__pb2.DropNamespaceRequest.SerializeToString, + response_deserializer=api__pb2.DropNamespaceResponse.FromString, + _registered_method=True) + self.ListNamespaces = channel.unary_unary( + '/api.Dgraph/ListNamespaces', + request_serializer=api__pb2.ListNamespacesRequest.SerializeToString, + response_deserializer=api__pb2.ListNamespacesResponse.FromString, + _registered_method=True) class DgraphServicer(object): @@ -101,6 +136,48 @@ def CheckVersion(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def RunDQL(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AllocateIDs(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateExtSnapshotStreamingState(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def StreamExtSnapshot(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateNamespace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DropNamespace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListNamespaces(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_DgraphServicer_to_server(servicer, server): rpc_method_handlers = { @@ -129,6 +206,41 @@ def add_DgraphServicer_to_server(servicer, server): request_deserializer=api__pb2.Check.FromString, response_serializer=api__pb2.Version.SerializeToString, ), + 'RunDQL': grpc.unary_unary_rpc_method_handler( + servicer.RunDQL, + request_deserializer=api__pb2.RunDQLRequest.FromString, + response_serializer=api__pb2.Response.SerializeToString, + ), + 'AllocateIDs': grpc.unary_unary_rpc_method_handler( + servicer.AllocateIDs, + request_deserializer=api__pb2.AllocateIDsRequest.FromString, + response_serializer=api__pb2.AllocateIDsResponse.SerializeToString, + ), + 'UpdateExtSnapshotStreamingState': grpc.unary_unary_rpc_method_handler( + servicer.UpdateExtSnapshotStreamingState, + request_deserializer=api__pb2.UpdateExtSnapshotStreamingStateRequest.FromString, + response_serializer=api__pb2.UpdateExtSnapshotStreamingStateResponse.SerializeToString, + ), + 'StreamExtSnapshot': grpc.stream_unary_rpc_method_handler( + servicer.StreamExtSnapshot, + request_deserializer=api__pb2.StreamExtSnapshotRequest.FromString, + response_serializer=api__pb2.StreamExtSnapshotResponse.SerializeToString, + ), + 'CreateNamespace': grpc.unary_unary_rpc_method_handler( + servicer.CreateNamespace, + request_deserializer=api__pb2.CreateNamespaceRequest.FromString, + response_serializer=api__pb2.CreateNamespaceResponse.SerializeToString, + ), + 'DropNamespace': grpc.unary_unary_rpc_method_handler( + servicer.DropNamespace, + request_deserializer=api__pb2.DropNamespaceRequest.FromString, + response_serializer=api__pb2.DropNamespaceResponse.SerializeToString, + ), + 'ListNamespaces': grpc.unary_unary_rpc_method_handler( + servicer.ListNamespaces, + request_deserializer=api__pb2.ListNamespacesRequest.FromString, + response_serializer=api__pb2.ListNamespacesResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'api.Dgraph', rpc_method_handlers) @@ -275,3 +387,192 @@ def CheckVersion(request, timeout, metadata, _registered_method=True) + + @staticmethod + def RunDQL(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/api.Dgraph/RunDQL', + api__pb2.RunDQLRequest.SerializeToString, + api__pb2.Response.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def AllocateIDs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/api.Dgraph/AllocateIDs', + api__pb2.AllocateIDsRequest.SerializeToString, + api__pb2.AllocateIDsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def UpdateExtSnapshotStreamingState(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/api.Dgraph/UpdateExtSnapshotStreamingState', + api__pb2.UpdateExtSnapshotStreamingStateRequest.SerializeToString, + api__pb2.UpdateExtSnapshotStreamingStateResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def StreamExtSnapshot(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_unary( + request_iterator, + target, + '/api.Dgraph/StreamExtSnapshot', + api__pb2.StreamExtSnapshotRequest.SerializeToString, + api__pb2.StreamExtSnapshotResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def CreateNamespace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/api.Dgraph/CreateNamespace', + api__pb2.CreateNamespaceRequest.SerializeToString, + api__pb2.CreateNamespaceResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def DropNamespace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/api.Dgraph/DropNamespace', + api__pb2.DropNamespaceRequest.SerializeToString, + api__pb2.DropNamespaceResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ListNamespaces(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/api.Dgraph/ListNamespaces', + api__pb2.ListNamespacesRequest.SerializeToString, + api__pb2.ListNamespacesResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) From f9dbb990c2e6b45ab87d1c5b614128b14850d79e Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Thu, 4 Sep 2025 18:12:29 -0400 Subject: [PATCH 04/10] Implement updated v1 api --- pydgraph/client.py | 365 ++++++++++++++++++++++++++++++++++++++-- pydgraph/client_stub.py | 29 ++++ 2 files changed, 382 insertions(+), 12 deletions(-) diff --git a/pydgraph/client.py b/pydgraph/client.py index 47bcf27..051a237 100755 --- a/pydgraph/client.py +++ b/pydgraph/client.py @@ -19,7 +19,7 @@ class DgraphClient(object): - """Creates a new Client for interacting with the Dgraph store. + """Creates a new Client for interacting with Dgraph. The client can be backed by multiple connections (to the same server, or multiple servers in a cluster). @@ -46,6 +46,9 @@ def check_version(self, timeout=None, metadata=None, credentials=None): metadata=new_metadata, credentials=credentials, ) + import logging + + logging.warning(f"Check version response: {response.tag}") return response.tag except Exception as error: if util.is_jwt_expired(error): @@ -62,21 +65,22 @@ def check_version(self, timeout=None, metadata=None, credentials=None): raise error def login(self, userid, password, timeout=None, metadata=None, credentials=None): - login_req = api.LoginRequest() - login_req.userid = userid - login_req.password = password - login_req.namespace = 0 - - response = self.any_client().login( - login_req, timeout=timeout, metadata=metadata, credentials=credentials + """Attempts a login via this client.""" + + return self.login_into_namespace( + userid, + password, + 0, + timeout=timeout, + metadata=metadata, + credentials=credentials, ) - self._jwt = api.Jwt() - self._jwt.ParseFromString(response.json) - self._login_metadata = [("accessjwt", self._jwt.access_jwt)] def login_into_namespace( self, userid, password, namespace, timeout=None, metadata=None, credentials=None ): + """Attempts a login into a namespace via this client.""" + login_req = api.LoginRequest() login_req.userid = userid login_req.password = password @@ -90,6 +94,8 @@ def login_into_namespace( self._login_metadata = [("accessjwt", self._jwt.access_jwt)] def retry_login(self, timeout=None, metadata=None, credentials=None): + """Attempts a retry login via this client.""" + if len(self._jwt.refresh_jwt) == 0: raise ValueError("refresh jwt should not be empty") @@ -105,6 +111,7 @@ def retry_login(self, timeout=None, metadata=None, credentials=None): def alter(self, operation, timeout=None, metadata=None, credentials=None): """Runs a modification via this client.""" + new_metadata = self.add_login_metadata(metadata) try: @@ -142,6 +149,7 @@ def _common_except_alter(error): def async_alter(self, operation, timeout=None, metadata=None, credentials=None): """The async version of alter.""" + new_metadata = self.add_login_metadata(metadata) return self.any_client().async_alter( operation, timeout=timeout, metadata=new_metadata, credentials=credentials @@ -156,10 +164,325 @@ def handle_alter_future(future): def txn(self, read_only=False, best_effort=False): """Creates a transaction.""" + return txn.Txn(self, read_only=read_only, best_effort=best_effort) + def run_dql( + self, + dql_query, + vars=None, + read_only=False, + best_effort=False, + resp_format="JSON", + timeout=None, + metadata=None, + credentials=None, + ): + """ + Runs a DQL query or mutation via this client. + + Args: + dql_query: The DQL query string to execute + vars: Variables to substitute in the query + read_only: Whether this is a read-only query + best_effort: Whether to use best effort for read queries + resp_format: Response format, either "JSON" or "RDF" + timeout: Request timeout + metadata: Additional metadata for the request + credentials: gRPC credentials + + Returns: + Response: The query response from Dgraph + + This is only supported on Dgraph v25.0.0 and above. + """ + + new_metadata = self.add_login_metadata(metadata) + + # Add explicit namespace metadata for RunDQL + # Extract namespace from JWT token if available + # TODO(Matthew): Remove this once Dgraph supports RunDQL without namespace metadata + if self._jwt.access_jwt: + import base64 + import json + + try: + # Decode JWT payload (second part after first dot) + payload_part = self._jwt.access_jwt.split(".")[1] + # Add padding if needed for base64 decoding + payload_part += "=" * (4 - len(payload_part) % 4) + payload = json.loads(base64.b64decode(payload_part)) + namespace = payload.get("namespace", 0) + new_metadata.append(("namespace", str(namespace))) + except Exception: + # If JWT decoding fails, use default namespace + new_metadata.append(("namespace", "0")) + + # Convert string format to enum if needed + if isinstance(resp_format, str): + if resp_format.upper() == "JSON": + resp_format = api.Request.RespFormat.JSON + elif resp_format.upper() == "RDF": + resp_format = api.Request.RespFormat.RDF + else: + raise ValueError( + f"Invalid resp_format: {resp_format}. Must be 'JSON' or 'RDF'" + ) + req = api.RunDQLRequest( + dql_query=dql_query, + vars=vars, + read_only=read_only, + best_effort=best_effort, + resp_format=resp_format, + ) + try: + return self.any_client().run_dql( + req, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + except Exception as error: + if util.is_jwt_expired(error): + self.retry_login() + new_metadata = self.add_login_metadata(metadata) + return self.any_client().run_dql( + req, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + else: + raise error + + def allocate_uids(self, how_many, timeout=None, metadata=None, credentials=None): + """ + AllocateUIDs allocates a given number of Node UIDs in the Graph and returns a start and end UIDs, + end excluded. The UIDs in the range [start, end) can then be used by the client in the mutations + going forward. Note that, each node in a Graph is assigned a UID in Dgraph. Dgraph ensures that + these UIDs are not allocated anywhere else throughout the operation of this cluster. This is useful + in bulk loader or live loader or similar applications. + + Args: + how_many: Number of UIDs to allocate + timeout: Request timeout + metadata: Additional metadata for the request + credentials: gRPC credentials + + Returns: + tuple: (start_uid, end_uid) where end_uid is exclusive + + This is only supported on Dgraph v25.0.0 and above. + """ + return self._allocate_ids(how_many, api.UID, timeout, metadata, credentials) + + def allocate_timestamps( + self, how_many, timeout=None, metadata=None, credentials=None + ): + """ + AllocateTimestamps gets a sequence of timestamps allocated from Dgraph. These timestamps can be + used in bulk loader and similar applications. + + Args: + how_many: Number of timestamps to allocate + timeout: Request timeout + metadata: Additional metadata for the request + credentials: gRPC credentials + + Returns: + tuple: (start_timestamp, end_timestamp) where end_timestamp is exclusive + + This is only supported on Dgraph v25.0.0 and above. + """ + return self._allocate_ids(how_many, api.TS, timeout, metadata, credentials) + + def allocate_namespaces( + self, how_many, timeout=None, metadata=None, credentials=None + ): + """ + AllocateNamespaces allocates a given number of namespaces in the Graph and returns a start and end + namespaces, end excluded. The namespaces in the range [start, end) can then be used by the client. + Dgraph ensures that these namespaces are NOT allocated anywhere else throughout the operation of + this cluster. This is useful in bulk loader or live loader or similar applications. + + Args: + how_many: Number of namespaces to allocate + timeout: Request timeout + metadata: Additional metadata for the request + credentials: gRPC credentials + + Returns: + tuple: (start_namespace, end_namespace) where end_namespace is exclusive + + This is only supported on Dgraph v25.0.0 and above. + """ + return self._allocate_ids(how_many, api.NS, timeout, metadata, credentials) + + def _allocate_ids( + self, how_many, lease_type, timeout=None, metadata=None, credentials=None + ): + """ + Helper method to allocate IDs of different types (UIDs, timestamps, namespaces). + + Args: + how_many: Number of IDs to allocate + lease_type: Type of lease (api.UID, api.TS, or api.NS) + timeout: Request timeout + metadata: Additional metadata for the request + credentials: gRPC credentials + + Returns: + tuple: (start, end) where end is exclusive + """ + if how_many <= 0: + raise ValueError("how_many must be greater than 0") + new_metadata = self.add_login_metadata(metadata) + req = api.AllocateIDsRequest(how_many=how_many, lease_type=lease_type) + try: + response = self.any_client().allocate_ids( + req, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + return response.start, response.end + 1 + except Exception as error: + if util.is_jwt_expired(error): + self.retry_login() + new_metadata = self.add_login_metadata(metadata) + response = self.any_client().allocate_ids( + req, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + return response.start, response.end + 1 + else: + raise error + + def create_namespace(self, timeout=None, metadata=None, credentials=None): + """Creates a new namespace and returns its ID. + + Args: + timeout: Optional timeout for the request. + metadata: Optional metadata to send with the request. + credentials: Optional credentials for the request. + + Returns: + int: The ID of the newly created namespace. + + Raises: + Exception: If the request fails. + """ + + new_metadata = self.add_login_metadata(metadata) + request = api.CreateNamespaceRequest() + + try: + response = self.any_client().create_namespace( + request, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + return response.namespace + except Exception as error: + if util.is_jwt_expired(error): + self.retry_login() + new_metadata = self.add_login_metadata(metadata) + response = self.any_client().create_namespace( + request, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + return response.namespace + else: + raise error + + def drop_namespace(self, namespace, timeout=None, metadata=None, credentials=None): + """Drops the specified namespace. If the namespace does not exist, the request will still + succeed. + + Args: + namespace (int): The ID of the namespace to drop. + timeout: Optional timeout for the request. + metadata: Optional metadata to send with the request. + credentials: Optional credentials for the request. + + Raises: + Exception: If the request fails. + """ + + new_metadata = self.add_login_metadata(metadata) + request = api.DropNamespaceRequest() + request.namespace = namespace + + try: + response = self.any_client().drop_namespace( + request, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + return response + except Exception as error: + if util.is_jwt_expired(error): + self.retry_login() + new_metadata = self.add_login_metadata(metadata) + response = self.any_client().drop_namespace( + request, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + return response + else: + raise error + + def list_namespaces(self, timeout=None, metadata=None, credentials=None): + """Lists all namespaces. + + Args: + timeout: Optional timeout for the request. + metadata: Optional metadata to send with the request. + credentials: Optional credentials for the request. + + Returns: + dict: A dictionary mapping namespace IDs to namespace objects. + + Raises: + Exception: If the request fails. + """ + + new_metadata = self.add_login_metadata(metadata) + request = api.ListNamespacesRequest() + + try: + response = self.any_client().list_namespaces( + request, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + return dict(response.namespaces) + except Exception as error: + if util.is_jwt_expired(error): + self.retry_login() + new_metadata = self.add_login_metadata(metadata) + response = self.any_client().list_namespaces( + request, + timeout=timeout, + metadata=new_metadata, + credentials=credentials, + ) + return dict(response.namespaces) + else: + raise error + def any_client(self): """Returns a random gRPC client so that requests are distributed evenly among them.""" + return random.choice(self._clients) # nosec # pylint: disable=insecure-random def add_login_metadata(self, metadata): @@ -186,6 +509,7 @@ def open(connection_string: str) -> DgraphClient: Raises: ValueError: If the connection string is invalid. """ + try: parsed_url = urllib.parse.urlparse(connection_string) if not parsed_url.scheme == "dgraph": @@ -210,6 +534,7 @@ def open(connection_string: str) -> DgraphClient: credentials = None options = None auth_header = None + namespace = 0 if "sslmode" in params: sslmode = params["sslmode"] @@ -232,6 +557,20 @@ def open(connection_string: str) -> DgraphClient: elif "bearertoken" in params: auth_header = f"Bearer {params['bearertoken']}" + if "namespace" in params: + try: + namespace = int(params["namespace"]) + if namespace < 0: + raise ValueError(f"namespace must be >= 0, got {namespace}") + except ValueError as e: + if "namespace must be >= 0" in str(e): + raise e + raise TypeError( + f"namespace must be an integer, got '{params['namespace']}'" + ) from e + if not username: + raise ValueError("username/password required when namespace is provided") + if auth_header: options = [("grpc.enable_http_proxy", 0)] call_credentials = grpc.metadata_call_credentials( @@ -248,6 +587,8 @@ def open(connection_string: str) -> DgraphClient: if username: thirty_seconds = 30 - client.login(username, password, timeout=thirty_seconds) + client.login_into_namespace( + username, password, namespace, timeout=thirty_seconds + ) return client diff --git a/pydgraph/client_stub.py b/pydgraph/client_stub.py index da432db..6307666 100644 --- a/pydgraph/client_stub.py +++ b/pydgraph/client_stub.py @@ -71,6 +71,35 @@ def check_version(self, check, timeout=None, metadata=None, credentials=None): check, timeout=timeout, metadata=metadata, credentials=credentials ) + def run_dql(self, req, timeout=None, metadata=None, credentials=None): + return self.stub.RunDQL( + req, timeout=timeout, metadata=metadata, credentials=credentials + ) + + def allocate_ids(self, req, timeout=None, metadata=None, credentials=None): + """Allocates IDs (UIDs, timestamps, or namespaces).""" + return self.stub.AllocateIDs( + req, timeout=timeout, metadata=metadata, credentials=credentials + ) + + def create_namespace(self, req, timeout=None, metadata=None, credentials=None): + """Creates a new namespace.""" + return self.stub.CreateNamespace( + req, timeout=timeout, metadata=metadata, credentials=credentials + ) + + def drop_namespace(self, req, timeout=None, metadata=None, credentials=None): + """Drops a namespace.""" + return self.stub.DropNamespace( + req, timeout=timeout, metadata=metadata, credentials=credentials + ) + + def list_namespaces(self, req, timeout=None, metadata=None, credentials=None): + """Lists all namespaces.""" + return self.stub.ListNamespaces( + req, timeout=timeout, metadata=metadata, credentials=credentials + ) + def close(self): """Deletes channel and stub.""" try: From 99a998fd59eb3116411259513fda1d1a2f1635fd Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Thu, 4 Sep 2025 18:12:41 -0400 Subject: [PATCH 05/10] Add/update tests --- tests/test_acl.py | 33 ++++++---- tests/test_client.py | 6 +- tests/test_connect.py | 92 +++++++++++++++++++++++++++ tests/test_namespace.py | 122 +++++++++++++++++++++++++++++++++++ tests/test_queries.py | 20 +++--- tests/test_zero.py | 136 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 388 insertions(+), 21 deletions(-) create mode 100644 tests/test_namespace.py create mode 100644 tests/test_zero.py diff --git a/tests/test_acl.py b/tests/test_acl.py index 7c4d7f6..8d03b41 100644 --- a/tests/test_acl.py +++ b/tests/test_acl.py @@ -15,7 +15,7 @@ from . import helper -@unittest.skipIf(shutil.which("dgraph") is None, "Dgraph binary not found.") +@unittest.skipIf(shutil.which("docker") is None, "Docker not found.") class TestACL(helper.ClientIntegrationTestCase): user_id = "alice" group_id = "dev" @@ -56,8 +56,7 @@ def test_alter(self): def change_permission(self, permission): bash_command = ( - "dgraph acl -a " - + self.TEST_SERVER_ADDR + "dgraph acl -a alpha1:9080" + " mod -g " + self.group_id + " -p name -m " @@ -74,8 +73,7 @@ def insert_sample_data(self): def add_user(self): bash_command = ( - "dgraph acl -a " - + self.TEST_SERVER_ADDR + "dgraph acl -a alpha1:9080" + " add -u " + self.user_id + " -p " @@ -86,8 +84,7 @@ def add_user(self): def add_group(self): bash_command = ( - "dgraph acl -a " - + self.TEST_SERVER_ADDR + "dgraph acl -a alpha1:9080" + " add -g " + self.group_id + " --guardian-creds user=groot;password=password" @@ -96,8 +93,7 @@ def add_group(self): def add_user_to_group(self): bash_command = ( - "dgraph acl -a " - + self.TEST_SERVER_ADDR + "dgraph acl -a alpha1:9080" + " mod -u " + self.user_id + " -l " @@ -107,14 +103,29 @@ def add_user_to_group(self): self.run_command(bash_command) def run_command(self, bash_command): + # Execute the dgraph command inside the Docker container + docker_command = [ + "docker", + "compose", + "-p", + "pydgraph", + "exec", + "-T", + "alpha1", + ] + bash_command.split() + try: - subprocess.check_output(bash_command.split()) + subprocess.check_output(docker_command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: + output_msg = "" + if e.output: + output_msg = "\nOutput: " + e.output.decode() self.fail( "Acl test failed: Unable to execute command " - + bash_command + + " ".join(docker_command) + "\n" + str(e) + + output_msg ) def try_reading(self, expected): diff --git a/tests/test_client.py b/tests/test_client.py index 3659439..7264419 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: © Hypermode Inc. +# SPDX-FileCopyrightText: Hypermode Inc. # SPDX-License-Identifier: Apache-2.0 """Tests construction of Dgraph client.""" __author__ = "Garvit Pahal" -__maintainer__ = "Hypermode Inc. " +__maintainer__ = "Hypermode Inc. " import unittest @@ -22,7 +22,7 @@ def test_constructor(self): def suite(): """Returns a tests suite object.""" suite_obj = unittest.TestSuite() - suite_obj.addTest(TestDgraphClient()) + suite_obj.addTest(unittest.makeSuite(TestDgraphClient)) return suite_obj diff --git a/tests/test_connect.py b/tests/test_connect.py index e604c43..cfa3a48 100644 --- a/tests/test_connect.py +++ b/tests/test_connect.py @@ -1,6 +1,7 @@ import json import os import unittest +from unittest.mock import patch import pytest @@ -8,6 +9,21 @@ class TestOpen(unittest.TestCase): + def _setup_jwt_mock(self, mock_stub_class): + """Helper method to set up JWT mock for open() tests.""" + + from pydgraph.proto import api_pb2 as api + + mock_stub = mock_stub_class.return_value + mock_response = mock_stub.login.return_value + + # Create a proper JWT protobuf and serialize it + jwt = api.Jwt() + jwt.access_jwt = "test_jwt" + jwt.refresh_jwt = "test_refresh" + mock_response.json = jwt.SerializeToString() + return mock_stub + @classmethod def setUpClass(cls): # Get connection details from environment or use defaults @@ -23,6 +39,7 @@ def setUpClass(cls): def test_connection_with_auth(self): """Test connection with username and password.""" + if not self.username or not self.password: self.skipTest("Username and password not configured") @@ -46,22 +63,26 @@ def test_connection_with_auth(self): def test_invalid_scheme(self): """Test that invalid scheme raises ValueError.""" + invalid_url = f"http://{self.dgraph_host}:{self.dgraph_port}" with pytest.raises(ValueError, match="scheme must be 'dgraph'"): open(invalid_url) def test_missing_hostname(self): """Test that missing hostname raises ValueError.""" + with pytest.raises(ValueError, match="hostname required"): open(f"dgraph://:{self.dgraph_port}") def test_missing_port(self): """Test that missing port raises ValueError.""" + with pytest.raises(ValueError, match="port required"): open(f"dgraph://{self.dgraph_host}") def test_username_without_password(self): """Test that username without password raises ValueError.""" + with pytest.raises( ValueError, match="password required when username is provided" ): @@ -69,14 +90,85 @@ def test_username_without_password(self): def test_invalid_sslmode(self): """Test that invalid sslmode raises ValueError.""" + with pytest.raises(ValueError, match="Invalid sslmode"): open(f"dgraph://{self.dgraph_host}:{self.dgraph_port}?sslmode=invalid") def test_unsupported_require_sslmode(self): """Test that sslmode=require raises appropriate error.""" + with pytest.raises(ValueError, match="sslmode=require is not supported"): open(f"dgraph://{self.dgraph_host}:{self.dgraph_port}?sslmode=require") + @patch("pydgraph.client_stub.DgraphClientStub") + def test_open_with_valid_integer_namespace(self, mock_stub_class): + """Test that open() accepts valid integer namespace with credentials.""" + + self._setup_jwt_mock(mock_stub_class) + + # Should not raise an exception with valid integer and credentials + try: + open("dgraph://user:pass@localhost:9080?namespace=123") + except (TypeError, ValueError): + self.fail("open() raised exception with valid integer namespace") + + def test_open_with_string_namespace_raises_error(self): + """Test that open() raises TypeError with non-numeric namespace.""" + + with pytest.raises(TypeError, match="namespace must be an integer"): + open("dgraph://localhost:9080?namespace=abc") + + def test_open_with_float_namespace_raises_error(self): + """Test that open() raises TypeError with float namespace.""" + + with pytest.raises(TypeError, match="namespace must be an integer"): + open("dgraph://localhost:9080?namespace=123.5") + + @patch("pydgraph.client_stub.DgraphClientStub") + def test_open_with_zero_namespace(self, mock_stub_class): + """Test that open() accepts zero as valid namespace with credentials.""" + + self._setup_jwt_mock(mock_stub_class) + + # Should not raise an exception with zero and credentials + try: + open("dgraph://user:pass@localhost:9080?namespace=0") + except (TypeError, ValueError) as e: + self.fail( + f"open() raised exception with zero namespace: {type(e).__name__}: {e}" + ) + + def test_open_with_negative_namespace_raises_error(self): + """Test that open() raises ValueError with negative namespace.""" + + with pytest.raises(ValueError, match="namespace must be >= 0"): + open("dgraph://localhost:9080?namespace=-1") + + def test_namespace_without_username_raises_error(self): + """Test that namespace without username/password raises ValueError.""" + + with pytest.raises( + ValueError, match="username/password required when namespace is provided" + ): + open("dgraph://localhost:9080?namespace=123") + + @patch("pydgraph.client_stub.DgraphClientStub") + def test_namespace_with_username_password_succeeds(self, mock_stub_class): + """Test that namespace with username/password is accepted.""" + + self._setup_jwt_mock(mock_stub_class) + + # Should not raise an exception with username/password + try: + open("dgraph://user:pass@localhost:9080?namespace=123") + except ValueError as e: + if "username/password required" in str(e): + self.fail( + "open() raised ValueError even with username/password provided" + ) + # Re-raise other ValueErrors (like connection errors) + raise + if __name__ == "__main__": unittest.main() diff --git a/tests/test_namespace.py b/tests/test_namespace.py new file mode 100644 index 0000000..e6994c6 --- /dev/null +++ b/tests/test_namespace.py @@ -0,0 +1,122 @@ +# SPDX-FileCopyrightText: © Hypermode Inc. +# SPDX-License-Identifier: Apache-2.0 + +__author__ = "Hypermode Inc. " +__maintainer__ = "Hypermode Inc. " + +import logging +import unittest + +from . import helper + + +class TestNamespaces(helper.ClientIntegrationTestCase): + """Tests for the namespace management methods.""" + + def setUp(self): + super(TestNamespaces, self).setUp() + helper.drop_all(self.client) + + def test_create_namespace(self): + """Test creating a new namespace returns valid namespace ID.""" + namespace_id = self.client.create_namespace() + + # Verify we get a valid namespace ID + self.assertIsInstance(namespace_id, int) + self.assertGreater(namespace_id, 0) + + # Test creating another namespace gives us a different ID + namespace_id2 = self.client.create_namespace() + self.assertIsInstance(namespace_id2, int) + self.assertGreater(namespace_id2, 0) + self.assertNotEqual(namespace_id, namespace_id2) + + def test_list_namespaces(self): + """Test listing namespaces returns a dictionary.""" + # Create a namespace first + namespace_id = self.client.create_namespace() + + # List namespaces + namespaces = self.client.list_namespaces() + + # Verify we get a dictionary + self.assertIsInstance(namespaces, dict) + + # The created namespace should be in the list + self.assertIn(namespace_id, namespaces) + + # Each namespace should have the expected structure + namespace_obj = namespaces[namespace_id] + self.assertTrue(hasattr(namespace_obj, "id")) + self.assertEqual(namespace_obj.id, namespace_id) + + def test_drop_namespace(self): + """Test dropping a namespace removes it from the list.""" + # Create a namespace + namespace_id = self.client.create_namespace() + + # Verify it exists in the list + namespaces_before = self.client.list_namespaces() + self.assertIn(namespace_id, namespaces_before) + + # Only drop if it's not namespace 0 (system namespace cannot be deleted) + if namespace_id != 0: + # Drop the namespace + self.client.drop_namespace(namespace_id) + + # Verify it's no longer in the list + namespaces_after = self.client.list_namespaces() + self.assertNotIn(namespace_id, namespaces_after) + else: + # If we got namespace 0, verify we can't drop it + with self.assertRaises(Exception) as cm: + self.client.drop_namespace(namespace_id) + self.assertIn("cannot be deleted", str(cm.exception)) + + def test_create_and_drop_multiple_namespaces(self): + """Test creating and dropping multiple namespaces.""" + # Create multiple namespaces + namespace_ids = [] + for _ in range(3): + namespace_id = self.client.create_namespace() + namespace_ids.append(namespace_id) + + # Verify all are in the list + namespaces = self.client.list_namespaces() + for namespace_id in namespace_ids: + self.assertIn(namespace_id, namespaces) + + # Drop all namespaces (except namespace 0 which cannot be deleted) + droppable_ids = [ns_id for ns_id in namespace_ids if ns_id != 0] + for namespace_id in droppable_ids: + self.client.drop_namespace(namespace_id) + + # Verify droppable namespaces are no longer in the list + namespaces_after = self.client.list_namespaces() + for namespace_id in droppable_ids: + self.assertNotIn(namespace_id, namespaces_after) + + # If namespace 0 was created, it should still be in the list + if 0 in namespace_ids: + self.assertIn(0, namespaces_after) + + def test_cannot_drop_namespace_zero(self): + """Test that namespace 0 cannot be dropped.""" + # Namespace 0 is the system namespace and cannot be deleted + import grpc + + with self.assertRaises(grpc.RpcError) as cm: + self.client.drop_namespace(0) + self.assertIn("cannot be deleted", str(cm.exception)) + + +def suite(): + suite_obj = unittest.TestSuite() + suite_obj.addTest(TestNamespaces()) + return suite_obj + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + runner = unittest.TextTestRunner() + runner.run(suite()) diff --git a/tests/test_queries.py b/tests/test_queries.py index d8684c9..bc986af 100755 --- a/tests/test_queries.py +++ b/tests/test_queries.py @@ -8,7 +8,6 @@ import json import logging -import sys import unittest import pydgraph @@ -39,7 +38,6 @@ def test_check_version(self): def test_mutation_and_query(self): """Runs mutation and verifies queries see the results.""" - txn = self.client.txn() _ = txn.mutate( pydgraph.Mutation(commit_now=True), @@ -94,17 +92,25 @@ def test_mutation_and_query(self): ) self.assertEqual(expected_rdf, response.rdf.decode("utf-8")) + """ Call run_dql and verify the result """ + response = self.client.run_dql( + dql_query=query, + vars={"$a": "Alice"}, + resp_format="JSON", + read_only=True, + ) + self.assertEqual( + [{"name": "Alice", "follows": [{"name": "Greg"}]}], + json.loads(response.json).get("me"), + ) + def is_number(number): - """Returns true if object is a number. Compatible with Python 2 and 3.""" - if sys.version_info[0] < 3: - return isinstance(number, (int, long)) - + """Returns true if object is a number""" return isinstance(number, int) def suite(): - """Returns a test suite object.""" suite_obj = unittest.TestSuite() suite_obj.addTest(TestQueries()) return suite_obj diff --git a/tests/test_zero.py b/tests/test_zero.py new file mode 100644 index 0000000..96462b2 --- /dev/null +++ b/tests/test_zero.py @@ -0,0 +1,136 @@ +# SPDX-FileCopyrightText: © Hypermode Inc. +# SPDX-License-Identifier: Apache-2.0 + +"""Tests for the allocation methods (allocate_uids, allocate_timestamps, allocate_namespaces).""" + +__author__ = "Hypermode Inc. " +__maintainer__ = "Hypermode Inc. " + +import logging +import unittest + +from . import helper + + +class TestAllocations(helper.ClientIntegrationTestCase): + """Tests for the allocation methods.""" + + def setUp(self): + super(TestAllocations, self).setUp() + helper.drop_all(self.client) + + def test_allocate_uids(self): + """Test allocating UIDs returns valid range.""" + how_many = 100 + start, end = self.client.allocate_uids(how_many) + + # Verify we get a valid range + self.assertIsInstance(start, int) + self.assertIsInstance(end, int) + self.assertGreater(start, 0) + self.assertGreater(end, start) + self.assertEqual(end - start, how_many) + + # Test allocating again gives us a different range + start2, end2 = self.client.allocate_uids(how_many) + self.assertNotEqual(start, start2) + self.assertGreaterEqual(start2, end) # Should be non-overlapping + + def test_allocate_timestamps(self): + """Test allocating timestamps returns valid range.""" + how_many = 50 + start, end = self.client.allocate_timestamps(how_many) + + # Verify we get a valid range + self.assertIsInstance(start, int) + self.assertIsInstance(end, int) + self.assertGreater(start, 0) + self.assertGreater(end, start) + self.assertEqual(end - start, how_many) + + # Test allocating again gives us a different range + start2, end2 = self.client.allocate_timestamps(how_many) + self.assertNotEqual(start, start2) + self.assertGreaterEqual(start2, end) # Should be non-overlapping + + def test_allocate_namespaces(self): + """Test allocating namespaces returns valid range.""" + how_many = 10 + start, end = self.client.allocate_namespaces(how_many) + + # Verify we get a valid range + self.assertIsInstance(start, int) + self.assertIsInstance(end, int) + self.assertGreater(start, 0) + self.assertGreater(end, start) + self.assertEqual(end - start, how_many) + + # Test allocating again gives us a different range + start2, end2 = self.client.allocate_namespaces(how_many) + self.assertNotEqual(start, start2) + self.assertGreaterEqual(start2, end) # Should be non-overlapping + + def test_allocate_uids_different_sizes(self): + """Test allocating different numbers of UIDs.""" + # Test small allocation + start1, end1 = self.client.allocate_uids(1) + self.assertEqual(end1 - start1, 1) + + # Test larger allocation + start2, end2 = self.client.allocate_uids(1000) + self.assertEqual(end2 - start2, 1000) + + # Ensure ranges don't overlap + self.assertGreaterEqual(start2, end1) + + def test_allocate_zero_items(self): + """Test allocating zero items raises ValueError.""" + with self.assertRaises(ValueError) as cm: + self.client.allocate_uids(0) + self.assertEqual(str(cm.exception), "how_many must be greater than 0") + + # Test negative values also raise ValueError + with self.assertRaises(ValueError) as cm: + self.client.allocate_timestamps(-1) + self.assertEqual(str(cm.exception), "how_many must be greater than 0") + + def test_allocation_methods_are_independent(self): + """Test that different allocation types don't interfere with each other.""" + # Allocate from each type + uid_start, uid_end = self.client.allocate_uids(100) + ts_start, ts_end = self.client.allocate_timestamps(100) + ns_start, ns_end = self.client.allocate_namespaces(100) + + # All should return valid ranges + self.assertEqual(uid_end - uid_start, 100) + self.assertEqual(ts_end - ts_start, 100) + self.assertEqual(ns_end - ns_start, 100) + + # The ranges can be different (they're different types of IDs) + # We just verify they're all positive and valid + self.assertGreater(uid_start, 0) + self.assertGreater(ts_start, 0) + self.assertGreater(ns_start, 0) + + def test_allocate_with_timeout(self): + """Test allocation methods work with timeout parameter.""" + start, end = self.client.allocate_uids(10, timeout=30) + self.assertEqual(end - start, 10) + + start, end = self.client.allocate_timestamps(10, timeout=30) + self.assertEqual(end - start, 10) + + start, end = self.client.allocate_namespaces(10, timeout=30) + self.assertEqual(end - start, 10) + + +def suite(): + suite_obj = unittest.TestSuite() + suite_obj.addTest(TestAllocations()) + return suite_obj + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + runner = unittest.TextTestRunner() + runner.run(suite()) From 2eb24cd720b4d42254d94b84471f82c322758556 Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Fri, 5 Sep 2025 14:07:33 -0400 Subject: [PATCH 06/10] Skip pre-v25 functionality in tests --- tests/helper.py | 52 +++++++++++++++++++++++++++++++++++++++++ tests/test_namespace.py | 1 + tests/test_queries.py | 41 ++++++++++++++++++++------------ tests/test_zero.py | 1 + 4 files changed, 80 insertions(+), 15 deletions(-) diff --git a/tests/helper.py b/tests/helper.py index 3b4ef34..5c1505e 100644 --- a/tests/helper.py +++ b/tests/helper.py @@ -7,9 +7,12 @@ __maintainer__ = "Hypermode Inc. " import os +import re import time import unittest +from packaging import version + import pydgraph SERVER_ADDR = "localhost:9180" @@ -37,6 +40,55 @@ def setup(): return client +def check_dgraph_version(client, min_version): + """Check if Dgraph version meets minimum requirement. + + Args: + client: Dgraph client instance + min_version: Minimum required version string (e.g., "25.0.0") + + Returns: + tuple: (is_compatible, actual_version, error_message) + is_compatible: bool indicating if version requirement is met + actual_version: string of actual Dgraph version or None + error_message: string error message or None + """ + try: + dgraph_version = client.check_version() + version_str = dgraph_version.lstrip("v") + # Extract just the semantic version part (major.minor.patch) + match = re.match(r"^(\d+\.\d+\.\d+)", version_str) + if match: + clean_version = match.group(1) + else: + # Fallback: try to parse as-is in case it's already clean + clean_version = version_str + + is_compatible = version.parse(clean_version) >= version.parse(min_version) + return is_compatible, dgraph_version, None + except Exception as e: + return False, None, str(e) + + +def skip_if_dgraph_version_below(client, min_version, test_case): + """Skip test if Dgraph version is below minimum requirement. + + Args: + client: Dgraph client instance + min_version: Minimum required version string (e.g., "25.0.0") + test_case: Test case instance to call skipTest on + """ + is_compatible, actual_version, error_msg = check_dgraph_version(client, min_version) + + if not is_compatible: + if error_msg: + test_case.skipTest(f"Could not determine Dgraph version: {error_msg}") + else: + test_case.skipTest( + f"Test requires Dgraph v{min_version}+, found {actual_version}" + ) + + class ClientIntegrationTestCase(unittest.TestCase): """Base class for other integration test cases. Provides a client object with a connection to the dgraph server. diff --git a/tests/test_namespace.py b/tests/test_namespace.py index e6994c6..1feed2f 100644 --- a/tests/test_namespace.py +++ b/tests/test_namespace.py @@ -15,6 +15,7 @@ class TestNamespaces(helper.ClientIntegrationTestCase): def setUp(self): super(TestNamespaces, self).setUp() + helper.skip_if_dgraph_version_below(self.client, "25.0.0", self) helper.drop_all(self.client) def test_create_namespace(self): diff --git a/tests/test_queries.py b/tests/test_queries.py index bc986af..2ed5ad4 100755 --- a/tests/test_queries.py +++ b/tests/test_queries.py @@ -20,9 +20,18 @@ class TestQueries(helper.ClientIntegrationTestCase): def setUp(self): super(TestQueries, self).setUp() - helper.drop_all(self.client) helper.set_schema(self.client, "name: string @index(term) .") + self.query = """query me($a: string) { + me(func: anyofterms(name, "Alice")) + { + name + follows + { + name + } + } + }""" def test_check_version(self): """Verifies the check_version method correctly returns the cluster version""" @@ -48,17 +57,6 @@ def test_mutation_and_query(self): """, ) - query = """query me($a: string) { - me(func: anyofterms(name, "Alice")) - { - name - follows - { - name - } - } - }""" - queryRDF = """query q($a: string) { q(func: anyofterms(name, "Alice")) { @@ -67,7 +65,7 @@ def test_mutation_and_query(self): } }""" - response = self.client.txn().query(query, variables={"$a": "Alice"}) + response = self.client.txn().query(self.query, variables={"$a": "Alice"}) self.assertEqual( [{"name": "Alice", "follows": [{"name": "Greg"}]}], json.loads(response.json).get("me"), @@ -92,9 +90,22 @@ def test_mutation_and_query(self): ) self.assertEqual(expected_rdf, response.rdf.decode("utf-8")) - """ Call run_dql and verify the result """ + def test_run_dql(self): + """Call run_dql (a version 25+ feature) and verify the result""" + helper.skip_if_dgraph_version_below(self.client, "25.0.0", self) + _ = self.client.run_dql( + dql_query=""" + { + set { + _:alice "Alice" . + _:greg "Greg" . + _:alice _:greg . + } + } + """ + ) response = self.client.run_dql( - dql_query=query, + dql_query=self.query, vars={"$a": "Alice"}, resp_format="JSON", read_only=True, diff --git a/tests/test_zero.py b/tests/test_zero.py index 96462b2..a404f97 100644 --- a/tests/test_zero.py +++ b/tests/test_zero.py @@ -17,6 +17,7 @@ class TestAllocations(helper.ClientIntegrationTestCase): def setUp(self): super(TestAllocations, self).setUp() + helper.skip_if_dgraph_version_below(self.client, "25.0.0", self) helper.drop_all(self.client) def test_allocate_uids(self): From df00fea1ca9593404c91616cd52dc9723ee47ce9 Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Wed, 10 Sep 2025 15:38:40 -0400 Subject: [PATCH 07/10] Update trunk --- .trunk/trunk.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.trunk/trunk.yaml b/.trunk/trunk.yaml index de8b3a3..2c751b5 100644 --- a/.trunk/trunk.yaml +++ b/.trunk/trunk.yaml @@ -25,20 +25,20 @@ lint: enabled: - trivy@0.66.0 - trivy@0.58.2 - - renovate@41.96.1 + - renovate@41.99.6 - actionlint@1.7.7 - bandit@1.8.6 - black@25.1.0 - - checkov@3.2.469 + - checkov@3.2.470 - git-diff-check - isort@6.0.1 - markdownlint@0.45.0 - prettier@3.6.2 - - ruff@0.12.11 + - ruff@0.12.12 - shellcheck@0.11.0 - shfmt@3.6.0 - taplo@0.10.0 - - trufflehog@3.90.5 + - trufflehog@3.90.6 - yamllint@1.37.1 actions: enabled: From ae9ba4e00e4af58b1b8e4a8396e88262c62a8bea Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Wed, 10 Sep 2025 15:38:54 -0400 Subject: [PATCH 08/10] Remove logging --- pydgraph/client.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/pydgraph/client.py b/pydgraph/client.py index 051a237..1d885d3 100755 --- a/pydgraph/client.py +++ b/pydgraph/client.py @@ -46,9 +46,6 @@ def check_version(self, timeout=None, metadata=None, credentials=None): metadata=new_metadata, credentials=credentials, ) - import logging - - logging.warning(f"Check version response: {response.tag}") return response.tag except Exception as error: if util.is_jwt_expired(error): From 0120bf19eb2e29d3f7437c96cf3d098b41d98017 Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Wed, 10 Sep 2025 15:39:06 -0400 Subject: [PATCH 09/10] Sync the version --- pydgraph/meta.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydgraph/meta.py b/pydgraph/meta.py index 5336ef2..2ce27c4 100644 --- a/pydgraph/meta.py +++ b/pydgraph/meta.py @@ -3,4 +3,4 @@ """Metadata about this package.""" -VERSION = "24.3.0" +VERSION = "25.0.0" From 701daeae26f88fb2fb2759a91df627313c244701 Mon Sep 17 00:00:00 2001 From: Matthew McNeely Date: Wed, 10 Sep 2025 15:39:37 -0400 Subject: [PATCH 10/10] Update the changelog and readme --- CHANGELOG.md | 16 ++++++++++++++++ README.md | 2 ++ 2 files changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b8f50e..e2b8a55 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,22 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## Unreleased + +**Added** + +- Add v25 Dgraph API + - Added new `run_dql()` method for executing DQL queries directly + - Added `allocate_uids()` method for allocating unique identifiers + - Added `allocate_timestamps()` method for allocating timestamps + - Added `allocate_namespaces()` method for allocating namespace IDs + - Added namespace management methods: + - `create_namespace()` - Creates a new namespace and returns its ID + - `drop_namespace()` - Drops the specified namespace + - `list_namespaces()` - Lists all available namespaces + - Updated proto definitions to support Dgraph v25 API + - Enhanced client functionality with new gRPC service methods + ## [v24.3.0] - 2025-07-29 **Chore** diff --git a/README.md b/README.md index 708025f..2cbe53a 100644 --- a/README.md +++ b/README.md @@ -101,6 +101,7 @@ Valid connection string args: | apikey | \ | a Dgraph Cloud API Key | | bearertoken | \ | an access token | | sslmode | disable \| require \| verify-ca | TLS option, the default is `disable`. If `verify-ca` is set, the TLS certificate configured in the Dgraph cluster must be from a valid certificate authority. | +| namespace | \ | a previously created integer-based namespace, username and password must be supplied | Note the `sslmode=require` pair is not supported and will throw an Exception if used. Python grpc does not support traffic over TLS that does not fully verify the certificate and domain. Developers @@ -115,6 +116,7 @@ Some example connection strings: | dgraph://sally:supersecret@dg.example.com:443?sslmode=verify-ca | Connect to remote server, use ACL and require TLS and a valid certificate from a CA | | dgraph://foo-bar.grpc.us-west-2.aws.cloud.dgraph.io:443?sslmode=verify-ca&apikey=\ | Connect to a Dgraph Cloud cluster | | dgraph://foo-bar.grpc.hypermode.com:443?sslmode=verify-ca&bearertoken=\ | Connect to a Dgraph cluster protected by a secure gateway | +| dgraph://sally:supersecret@dg.example.com:443?namespace=2 | Connect to a ACL enabled Dgraph cluster in namespace 2 | Using the `Open` function with a connection string: