From ccbae66e5d81289f1423b7605a8d0d255d699f02 Mon Sep 17 00:00:00 2001 From: Lucas Vieira Date: Mon, 23 Mar 2026 22:50:17 -0300 Subject: [PATCH 1/8] fix: correct package name references to fila-client --- README.md | 8 ++++---- package-lock.json | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index f3ba151..f4d952e 100644 --- a/README.md +++ b/README.md @@ -5,13 +5,13 @@ JavaScript/TypeScript client SDK for the [Fila](https://github.com/faisca/fila) ## Installation ```bash -npm install @fila/client +npm install fila-client ``` ## Usage ```typescript -import { Client } from "@fila/client"; +import { Client } from "fila-client"; const client = new Client("localhost:5555"); @@ -54,7 +54,7 @@ For self-signed or private CA certificates, pass the CA cert explicitly: ```typescript import * as fs from "fs"; -import { Client } from "@fila/client"; +import { Client } from "fila-client"; const client = new Client("localhost:5555", { caCert: fs.readFileSync("ca.pem"), @@ -149,7 +149,7 @@ Close the underlying gRPC channel. Per-operation error classes are thrown for specific failure modes: ```typescript -import { QueueNotFoundError, MessageNotFoundError } from "@fila/client"; +import { QueueNotFoundError, MessageNotFoundError } from "fila-client"; try { await client.enqueue("missing-queue", null, Buffer.from("test")); diff --git a/package-lock.json b/package-lock.json index d5d4671..842f9bb 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,11 +1,11 @@ { - "name": "@fila/client", + "name": "fila-client", "version": "0.1.0", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "@fila/client", + "name": "fila-client", "version": "0.1.0", "license": "AGPL-3.0-or-later", "dependencies": { @@ -2997,6 +2997,7 @@ "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.27.0", "fdir": "^6.5.0", From 2ad3034fad674e3ac467b36ef0c6ae0101e187b1 Mon Sep 17 00:00:00 2001 From: Lucas Vieira Date: Sun, 22 Mar 2026 13:01:52 -0300 Subject: [PATCH 2/8] feat: transparent leader hint reconnect on consume --- src/client.ts | 112 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 96 insertions(+), 16 deletions(-) diff --git a/src/client.ts b/src/client.ts index 286c6fa..1ddd0da 100644 --- a/src/client.ts +++ b/src/client.ts @@ -42,6 +42,34 @@ function loadServiceProto(): grpc.GrpcObject { return grpc.loadPackageDefinition(packageDefinition); } +/** Metadata key the server uses to indicate the current queue leader address. */ +const LEADER_ADDR_KEY = "x-fila-leader-addr"; + +/** + * Extract the leader address from a gRPC UNAVAILABLE error's trailing metadata. + * Returns the address string, or undefined if not present. + */ +function extractLeaderAddr(err: grpc.ServiceError): string | undefined { + if (err.code !== grpc.status.UNAVAILABLE) return undefined; + const values = err.metadata?.get(LEADER_ADDR_KEY); + if (values && values.length > 0) { + return String(values[0]); + } + return undefined; +} + +/** Create a FilaServiceClient for the given address and credentials. */ +function createGrpcClient( + addr: string, + creds: grpc.ChannelCredentials +): FilaServiceClient { + const proto = loadServiceProto(); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const FilaService = (proto.fila as any).v1 + .FilaService as grpc.ServiceClientConstructor; + return new FilaService(addr, creds) as unknown as FilaServiceClient; +} + function mapEnqueueError(err: grpc.ServiceError): FilaError { if (err.code === grpc.status.NOT_FOUND) { return new QueueNotFoundError(`enqueue: ${err.details}`); @@ -105,6 +133,7 @@ export interface ClientOptions { */ export class Client { private readonly grpcClient: FilaServiceClient; + private readonly creds: grpc.ChannelCredentials; private readonly apiKey?: string; /** @@ -113,11 +142,6 @@ export class Client { * @param options - Optional TLS and authentication settings. */ constructor(addr: string, options?: ClientOptions) { - const proto = loadServiceProto(); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - const FilaService = (proto.fila as any).v1 - .FilaService as grpc.ServiceClientConstructor; - const hasClientCert = !!options?.clientCert; const hasClientKey = !!options?.clientKey; const tlsEnabled = !!options?.tls || !!options?.caCert; @@ -129,27 +153,23 @@ export class Client { throw new Error("clientCert and clientKey must be provided together"); } - let creds: grpc.ChannelCredentials; if (options?.caCert) { - creds = grpc.credentials.createSsl( + this.creds = grpc.credentials.createSsl( options.caCert, options.clientKey ?? null, options.clientCert ?? null ); } else if (tlsEnabled) { - creds = grpc.credentials.createSsl( + this.creds = grpc.credentials.createSsl( null, options?.clientKey ?? null, options?.clientCert ?? null ); } else { - creds = grpc.credentials.createInsecure(); + this.creds = grpc.credentials.createInsecure(); } - this.grpcClient = new FilaService( - addr, - creds - ) as unknown as FilaServiceClient; + this.grpcClient = createGrpcClient(addr, this.creds); this.apiKey = options?.apiKey; } @@ -202,15 +222,27 @@ export class Client { * Returns an async iterable that yields messages as they become available. * Nil message frames (keepalive signals) are skipped automatically. * + * If the server returns UNAVAILABLE with an `x-fila-leader-addr` metadata + * header, the client transparently reconnects to the leader node and retries + * the consume stream once (max 1 redirect per call). + * * @param queue - Queue to consume from. * @throws {QueueNotFoundError} If the queue does not exist. * @throws {RPCError} For unexpected gRPC failures. */ async *consume(queue: string): AsyncIterable { - const stream = this.grpcClient.consume({ queue }, this.callMetadata()); + yield* this.consumeInner(queue, false); + } - // Wrap the Node.js readable stream into an async iterable. - // grpc-js streams are already async-iterable in modern versions. + /** + * Inner consume implementation that optionally follows a leader hint. + * @param redirected - true if this is already a redirected attempt (prevents loops). + */ + private async *consumeInner( + queue: string, + redirected: boolean + ): AsyncIterable { + const stream = this.grpcClient.consume({ queue }, this.callMetadata()); const iterable = stream as AsyncIterable; try { @@ -233,6 +265,54 @@ export class Client { } } catch (err) { const svcErr = err as grpc.ServiceError; + + // If we haven't redirected yet and the server tells us who the leader is, + // open a new connection to the leader and retry the consume stream. + if (!redirected) { + const leaderAddr = extractLeaderAddr(svcErr); + if (leaderAddr) { + stream.cancel(); + const leaderClient = createGrpcClient(leaderAddr, this.creds); + const leaderStream = leaderClient.consume( + { queue }, + this.callMetadata() + ); + const leaderIterable = + leaderStream as AsyncIterable; + try { + for await (const resp of leaderIterable) { + const msg = resp.message; + if (!msg || !msg.id) { + continue; + } + const metadata = msg.metadata; + yield { + id: msg.id, + headers: msg.headers ?? {}, + payload: Buffer.isBuffer(msg.payload) + ? msg.payload + : Buffer.from(msg.payload ?? ""), + fairnessKey: metadata?.fairnessKey ?? "", + attemptCount: metadata?.attemptCount ?? 0, + queue: metadata?.queueId ?? "", + }; + } + } catch (retryErr) { + const retrySvcErr = retryErr as grpc.ServiceError; + if ( + retrySvcErr.code !== undefined && + retrySvcErr.code !== grpc.status.CANCELLED + ) { + throw mapConsumeError(retrySvcErr); + } + } finally { + leaderStream.cancel(); + (leaderClient as unknown as grpc.Client).close(); + } + return; + } + } + if (svcErr.code !== undefined && svcErr.code !== grpc.status.CANCELLED) { throw mapConsumeError(svcErr); } From c5bd006868ad713eb713c236e8f6c36216b2cea9 Mon Sep 17 00:00:00 2001 From: Lucas Vieira Date: Mon, 23 Mar 2026 22:54:33 -0300 Subject: [PATCH 3/8] refactor: extract consume message mapping helper --- src/client.ts | 55 +++++++++++++++++++++++---------------------------- 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/src/client.ts b/src/client.ts index 1ddd0da..0e6b51d 100644 --- a/src/client.ts +++ b/src/client.ts @@ -173,6 +173,27 @@ export class Client { this.apiKey = options?.apiKey; } + /** Map a ConsumeResponse to a ConsumeMessage, or undefined for keepalive frames. */ + private static mapConsumeResponse( + resp: ConsumeResponse__Output + ): ConsumeMessage | undefined { + const msg = resp.message; + if (!msg || !msg.id) { + return undefined; // keepalive frame + } + const metadata = msg.metadata; + return { + id: msg.id, + headers: msg.headers ?? {}, + payload: Buffer.isBuffer(msg.payload) + ? msg.payload + : Buffer.from(msg.payload ?? ""), + fairnessKey: metadata?.fairnessKey ?? "", + attemptCount: metadata?.attemptCount ?? 0, + queue: metadata?.queueId ?? "", + }; + } + /** Build gRPC metadata, attaching the API key if configured. */ private callMetadata(): grpc.Metadata { const md = new grpc.Metadata(); @@ -247,21 +268,8 @@ export class Client { try { for await (const resp of iterable) { - const msg = resp.message; - if (!msg || !msg.id) { - continue; // keepalive frame - } - const metadata = msg.metadata; - yield { - id: msg.id, - headers: msg.headers ?? {}, - payload: Buffer.isBuffer(msg.payload) - ? msg.payload - : Buffer.from(msg.payload ?? ""), - fairnessKey: metadata?.fairnessKey ?? "", - attemptCount: metadata?.attemptCount ?? 0, - queue: metadata?.queueId ?? "", - }; + const mapped = Client.mapConsumeResponse(resp); + if (mapped) yield mapped; } } catch (err) { const svcErr = err as grpc.ServiceError; @@ -281,21 +289,8 @@ export class Client { leaderStream as AsyncIterable; try { for await (const resp of leaderIterable) { - const msg = resp.message; - if (!msg || !msg.id) { - continue; - } - const metadata = msg.metadata; - yield { - id: msg.id, - headers: msg.headers ?? {}, - payload: Buffer.isBuffer(msg.payload) - ? msg.payload - : Buffer.from(msg.payload ?? ""), - fairnessKey: metadata?.fairnessKey ?? "", - attemptCount: metadata?.attemptCount ?? 0, - queue: metadata?.queueId ?? "", - }; + const mapped = Client.mapConsumeResponse(resp); + if (mapped) yield mapped; } } catch (retryErr) { const retrySvcErr = retryErr as grpc.ServiceError; From 3ec5fcf9a7c8a45c07438c7f206ce5eef30b5e4a Mon Sep 17 00:00:00 2001 From: Lucas Vieira Date: Mon, 23 Mar 2026 23:26:40 -0300 Subject: [PATCH 4/8] chore: bump version to 0.2.0 --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 842f9bb..0b953af 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "fila-client", - "version": "0.1.0", + "version": "0.2.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "fila-client", - "version": "0.1.0", + "version": "0.2.0", "license": "AGPL-3.0-or-later", "dependencies": { "@grpc/grpc-js": "^1.12.0", diff --git a/package.json b/package.json index 852511a..abf30ec 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "fila-client", - "version": "0.1.0", + "version": "0.2.0", "description": "JavaScript/TypeScript client SDK for the Fila message broker", "repository": { "type": "git", From 0300733d7e7176072c3de41c72425257985df2ec Mon Sep 17 00:00:00 2001 From: Lucas Vieira Date: Tue, 24 Mar 2026 10:20:36 -0300 Subject: [PATCH 5/8] feat: add batch enqueue, smart batching, and delivery batching (#3) add batchEnqueue() for explicit batch operations with per-message results. enqueue() now routes through an auto-batcher by default (setImmediate-based opportunistic batching). three batch modes: auto (default), linger (timer-based), and disabled. single-item optimization uses Enqueue RPC to preserve error types. consume stream transparently unpacks batched ConsumeResponse.messages field. close() drains pending messages before disconnecting. --- generated/fila/v1/BatchEnqueueRequest.ts | 11 + generated/fila/v1/BatchEnqueueResponse.ts | 11 + generated/fila/v1/BatchEnqueueResult.ts | 15 ++ generated/fila/v1/ConsumeResponse.ts | 2 + generated/fila/v1/FilaService.ts | 14 + generated/service.ts | 3 + proto/fila/v1/service.proto | 19 +- src/batcher.ts | 266 +++++++++++++++++++ src/client.ts | 220 +++++++++++++--- src/index.ts | 2 +- src/types.ts | 15 ++ test/auth.test.ts | 16 +- test/batch.test.ts | 304 ++++++++++++++++++++++ test/batcher.unit.test.ts | 54 ++++ test/client.test.ts | 6 +- 15 files changed, 916 insertions(+), 42 deletions(-) create mode 100644 generated/fila/v1/BatchEnqueueRequest.ts create mode 100644 generated/fila/v1/BatchEnqueueResponse.ts create mode 100644 generated/fila/v1/BatchEnqueueResult.ts create mode 100644 src/batcher.ts create mode 100644 test/batch.test.ts create mode 100644 test/batcher.unit.test.ts diff --git a/generated/fila/v1/BatchEnqueueRequest.ts b/generated/fila/v1/BatchEnqueueRequest.ts new file mode 100644 index 0000000..afd9d70 --- /dev/null +++ b/generated/fila/v1/BatchEnqueueRequest.ts @@ -0,0 +1,11 @@ +// Original file: proto/fila/v1/service.proto + +import type { EnqueueRequest as _fila_v1_EnqueueRequest, EnqueueRequest__Output as _fila_v1_EnqueueRequest__Output } from '../../fila/v1/EnqueueRequest'; + +export interface BatchEnqueueRequest { + 'messages'?: (_fila_v1_EnqueueRequest)[]; +} + +export interface BatchEnqueueRequest__Output { + 'messages': (_fila_v1_EnqueueRequest__Output)[]; +} diff --git a/generated/fila/v1/BatchEnqueueResponse.ts b/generated/fila/v1/BatchEnqueueResponse.ts new file mode 100644 index 0000000..0a876cb --- /dev/null +++ b/generated/fila/v1/BatchEnqueueResponse.ts @@ -0,0 +1,11 @@ +// Original file: proto/fila/v1/service.proto + +import type { BatchEnqueueResult as _fila_v1_BatchEnqueueResult, BatchEnqueueResult__Output as _fila_v1_BatchEnqueueResult__Output } from '../../fila/v1/BatchEnqueueResult'; + +export interface BatchEnqueueResponse { + 'results'?: (_fila_v1_BatchEnqueueResult)[]; +} + +export interface BatchEnqueueResponse__Output { + 'results': (_fila_v1_BatchEnqueueResult__Output)[]; +} diff --git a/generated/fila/v1/BatchEnqueueResult.ts b/generated/fila/v1/BatchEnqueueResult.ts new file mode 100644 index 0000000..9b08473 --- /dev/null +++ b/generated/fila/v1/BatchEnqueueResult.ts @@ -0,0 +1,15 @@ +// Original file: proto/fila/v1/service.proto + +import type { EnqueueResponse as _fila_v1_EnqueueResponse, EnqueueResponse__Output as _fila_v1_EnqueueResponse__Output } from '../../fila/v1/EnqueueResponse'; + +export interface BatchEnqueueResult { + 'success'?: (_fila_v1_EnqueueResponse | null); + 'error'?: (string); + 'result'?: "success"|"error"; +} + +export interface BatchEnqueueResult__Output { + 'success'?: (_fila_v1_EnqueueResponse__Output | null); + 'error'?: (string); + 'result'?: "success"|"error"; +} diff --git a/generated/fila/v1/ConsumeResponse.ts b/generated/fila/v1/ConsumeResponse.ts index 31c9e07..9296e26 100644 --- a/generated/fila/v1/ConsumeResponse.ts +++ b/generated/fila/v1/ConsumeResponse.ts @@ -4,8 +4,10 @@ import type { Message as _fila_v1_Message, Message__Output as _fila_v1_Message__ export interface ConsumeResponse { 'message'?: (_fila_v1_Message | null); + 'messages'?: (_fila_v1_Message)[]; } export interface ConsumeResponse__Output { 'message': (_fila_v1_Message__Output | null); + 'messages': (_fila_v1_Message__Output)[]; } diff --git a/generated/fila/v1/FilaService.ts b/generated/fila/v1/FilaService.ts index abfb6cf..9b23bad 100644 --- a/generated/fila/v1/FilaService.ts +++ b/generated/fila/v1/FilaService.ts @@ -4,6 +4,8 @@ import type * as grpc from '@grpc/grpc-js' import type { MethodDefinition } from '@grpc/proto-loader' import type { AckRequest as _fila_v1_AckRequest, AckRequest__Output as _fila_v1_AckRequest__Output } from '../../fila/v1/AckRequest'; import type { AckResponse as _fila_v1_AckResponse, AckResponse__Output as _fila_v1_AckResponse__Output } from '../../fila/v1/AckResponse'; +import type { BatchEnqueueRequest as _fila_v1_BatchEnqueueRequest, BatchEnqueueRequest__Output as _fila_v1_BatchEnqueueRequest__Output } from '../../fila/v1/BatchEnqueueRequest'; +import type { BatchEnqueueResponse as _fila_v1_BatchEnqueueResponse, BatchEnqueueResponse__Output as _fila_v1_BatchEnqueueResponse__Output } from '../../fila/v1/BatchEnqueueResponse'; import type { ConsumeRequest as _fila_v1_ConsumeRequest, ConsumeRequest__Output as _fila_v1_ConsumeRequest__Output } from '../../fila/v1/ConsumeRequest'; import type { ConsumeResponse as _fila_v1_ConsumeResponse, ConsumeResponse__Output as _fila_v1_ConsumeResponse__Output } from '../../fila/v1/ConsumeResponse'; import type { EnqueueRequest as _fila_v1_EnqueueRequest, EnqueueRequest__Output as _fila_v1_EnqueueRequest__Output } from '../../fila/v1/EnqueueRequest'; @@ -21,6 +23,15 @@ export interface FilaServiceClient extends grpc.Client { ack(argument: _fila_v1_AckRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; ack(argument: _fila_v1_AckRequest, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; + BatchEnqueue(argument: _fila_v1_BatchEnqueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; + BatchEnqueue(argument: _fila_v1_BatchEnqueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; + BatchEnqueue(argument: _fila_v1_BatchEnqueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; + BatchEnqueue(argument: _fila_v1_BatchEnqueueRequest, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; + batchEnqueue(argument: _fila_v1_BatchEnqueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; + batchEnqueue(argument: _fila_v1_BatchEnqueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; + batchEnqueue(argument: _fila_v1_BatchEnqueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; + batchEnqueue(argument: _fila_v1_BatchEnqueueRequest, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; + Consume(argument: _fila_v1_ConsumeRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; Consume(argument: _fila_v1_ConsumeRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; consume(argument: _fila_v1_ConsumeRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; @@ -49,6 +60,8 @@ export interface FilaServiceClient extends grpc.Client { export interface FilaServiceHandlers extends grpc.UntypedServiceImplementation { Ack: grpc.handleUnaryCall<_fila_v1_AckRequest__Output, _fila_v1_AckResponse>; + BatchEnqueue: grpc.handleUnaryCall<_fila_v1_BatchEnqueueRequest__Output, _fila_v1_BatchEnqueueResponse>; + Consume: grpc.handleServerStreamingCall<_fila_v1_ConsumeRequest__Output, _fila_v1_ConsumeResponse>; Enqueue: grpc.handleUnaryCall<_fila_v1_EnqueueRequest__Output, _fila_v1_EnqueueResponse>; @@ -59,6 +72,7 @@ export interface FilaServiceHandlers extends grpc.UntypedServiceImplementation { export interface FilaServiceDefinition extends grpc.ServiceDefinition { Ack: MethodDefinition<_fila_v1_AckRequest, _fila_v1_AckResponse, _fila_v1_AckRequest__Output, _fila_v1_AckResponse__Output> + BatchEnqueue: MethodDefinition<_fila_v1_BatchEnqueueRequest, _fila_v1_BatchEnqueueResponse, _fila_v1_BatchEnqueueRequest__Output, _fila_v1_BatchEnqueueResponse__Output> Consume: MethodDefinition<_fila_v1_ConsumeRequest, _fila_v1_ConsumeResponse, _fila_v1_ConsumeRequest__Output, _fila_v1_ConsumeResponse__Output> Enqueue: MethodDefinition<_fila_v1_EnqueueRequest, _fila_v1_EnqueueResponse, _fila_v1_EnqueueRequest__Output, _fila_v1_EnqueueResponse__Output> Nack: MethodDefinition<_fila_v1_NackRequest, _fila_v1_NackResponse, _fila_v1_NackRequest__Output, _fila_v1_NackResponse__Output> diff --git a/generated/service.ts b/generated/service.ts index 7b41ec6..726d422 100644 --- a/generated/service.ts +++ b/generated/service.ts @@ -12,6 +12,9 @@ export interface ProtoGrpcType { v1: { AckRequest: MessageTypeDefinition AckResponse: MessageTypeDefinition + BatchEnqueueRequest: MessageTypeDefinition + BatchEnqueueResponse: MessageTypeDefinition + BatchEnqueueResult: MessageTypeDefinition ConsumeRequest: MessageTypeDefinition ConsumeResponse: MessageTypeDefinition EnqueueRequest: MessageTypeDefinition diff --git a/proto/fila/v1/service.proto b/proto/fila/v1/service.proto index f14fdd0..fc0f710 100644 --- a/proto/fila/v1/service.proto +++ b/proto/fila/v1/service.proto @@ -6,6 +6,7 @@ import "fila/v1/messages.proto"; // Hot-path RPCs for producers and consumers. service FilaService { rpc Enqueue(EnqueueRequest) returns (EnqueueResponse); + rpc BatchEnqueue(BatchEnqueueRequest) returns (BatchEnqueueResponse); rpc Consume(ConsumeRequest) returns (stream ConsumeResponse); rpc Ack(AckRequest) returns (AckResponse); rpc Nack(NackRequest) returns (NackResponse); @@ -26,7 +27,8 @@ message ConsumeRequest { } message ConsumeResponse { - Message message = 1; + Message message = 1; // Single message (backward compatible, used when batch size is 1) + repeated Message messages = 2; // Batched messages (populated when server sends multiple at once) } message AckRequest { @@ -43,3 +45,18 @@ message NackRequest { } message NackResponse {} + +message BatchEnqueueRequest { + repeated EnqueueRequest messages = 1; +} + +message BatchEnqueueResponse { + repeated BatchEnqueueResult results = 1; +} + +message BatchEnqueueResult { + oneof result { + EnqueueResponse success = 1; + string error = 2; + } +} diff --git a/src/batcher.ts b/src/batcher.ts new file mode 100644 index 0000000..c9b3586 --- /dev/null +++ b/src/batcher.ts @@ -0,0 +1,266 @@ +import * as grpc from "@grpc/grpc-js"; + +import { QueueNotFoundError, RPCError } from "./errors"; +import type { EnqueueMessage } from "./types"; +import type { FilaServiceClient } from "../generated/fila/v1/FilaService"; +import type { EnqueueResponse__Output } from "../generated/fila/v1/EnqueueResponse"; +import type { BatchEnqueueResponse__Output } from "../generated/fila/v1/BatchEnqueueResponse"; + +/** Controls how the SDK batches enqueue() calls. */ +export type BatchMode = + | { mode: "auto"; maxBatchSize?: number } + | { mode: "linger"; lingerMs: number; batchSize: number } + | { mode: "disabled" }; + +/** A queued enqueue item awaiting batch flush. */ +interface BatchItem { + message: EnqueueMessage; + resolve: (messageId: string) => void; + reject: (err: Error) => void; +} + +function mapEnqueueError(err: grpc.ServiceError): Error { + if (err.code === grpc.status.NOT_FOUND) { + return new QueueNotFoundError(`enqueue: ${err.details}`); + } + return new RPCError(err.code, err.details); +} + +/** + * Background batcher that collects enqueue() calls and flushes them + * as batch RPCs. Supports auto (opportunistic) and linger (timer-based) modes. + */ +export class Batcher { + private readonly grpcClient: FilaServiceClient; + private readonly callMetadata: () => grpc.Metadata; + private readonly batchMode: BatchMode; + private readonly maxBatchSize: number; + + private pending: BatchItem[] = []; + private flushScheduled = false; + private closed = false; + private drainResolvers: Array<() => void> = []; + private lingerTimer: ReturnType | null = null; + + constructor( + grpcClient: FilaServiceClient, + callMetadata: () => grpc.Metadata, + batchMode: BatchMode + ) { + this.grpcClient = grpcClient; + this.callMetadata = callMetadata; + this.batchMode = batchMode; + + if (batchMode.mode === "auto") { + this.maxBatchSize = batchMode.maxBatchSize ?? 100; + } else if (batchMode.mode === "linger") { + this.maxBatchSize = batchMode.batchSize; + } else { + this.maxBatchSize = 1; + } + } + + /** + * Submit a message for batched enqueue. Returns a promise that resolves + * with the message ID when the batch containing this message is flushed. + */ + submit(message: EnqueueMessage): Promise { + if (this.closed) { + return Promise.reject( + new RPCError(grpc.status.UNAVAILABLE, "batcher is closed") + ); + } + + return new Promise((resolve, reject) => { + this.pending.push({ message, resolve, reject }); + this.scheduleFlush(); + }); + } + + /** + * Drain all pending messages before closing. Returns a promise that + * resolves when all pending messages have been flushed. + */ + async drain(): Promise { + this.closed = true; + + if (this.lingerTimer !== null) { + clearTimeout(this.lingerTimer); + this.lingerTimer = null; + } + + if (this.pending.length === 0) { + return; + } + + return new Promise((resolve) => { + this.drainResolvers.push(resolve); + this.flushAll(); + }); + } + + private scheduleFlush(): void { + if (this.batchMode.mode === "auto") { + this.scheduleAutoFlush(); + } else if (this.batchMode.mode === "linger") { + this.scheduleLingerFlush(); + } + } + + /** + * Auto mode: schedule a flush via setImmediate. Messages that arrive + * within the same event loop turn will cluster into the same batch. + * At low load, each message is sent individually. At high load, + * messages naturally batch together. + */ + private scheduleAutoFlush(): void { + if (this.flushScheduled) return; + this.flushScheduled = true; + + setImmediate(() => { + this.flushScheduled = false; + this.flushAll(); + }); + } + + /** + * Linger mode: start a timer on the first message. Flush when the batch + * fills or the timer fires, whichever comes first. + */ + private scheduleLingerFlush(): void { + if (this.batchMode.mode !== "linger") return; + + // If batch is full, flush immediately. + if (this.pending.length >= this.batchMode.batchSize) { + if (this.lingerTimer !== null) { + clearTimeout(this.lingerTimer); + this.lingerTimer = null; + } + this.flushAll(); + return; + } + + // Start timer if not already running. + if (this.lingerTimer === null) { + this.lingerTimer = setTimeout(() => { + this.lingerTimer = null; + this.flushAll(); + }, this.batchMode.lingerMs); + } + } + + /** + * Flush all pending items, splitting into maxBatchSize chunks. + */ + private flushAll(): void { + while (this.pending.length > 0) { + const items = this.pending.splice(0, this.maxBatchSize); + // Fire-and-forget: flush concurrently. + this.flushBatch(items).then(() => { + this.notifyDrainComplete(); + }); + } + // Also check drain in case pending was already empty. + this.notifyDrainComplete(); + } + + private notifyDrainComplete(): void { + if (this.pending.length === 0 && this.drainResolvers.length > 0) { + const resolvers = this.drainResolvers.splice(0); + for (const resolve of resolvers) { + resolve(); + } + } + } + + /** + * Flush a batch of items. Single item uses Enqueue RPC (preserves error + * types like QueueNotFoundError). Multiple items use BatchEnqueue. + */ + private async flushBatch(items: BatchItem[]): Promise { + if (items.length === 0) return; + + if (items.length === 1) { + return this.flushSingle(items[0]); + } + + return this.flushMultiple(items); + } + + /** Flush a single item via the regular Enqueue RPC. */ + private flushSingle(item: BatchItem): Promise { + return new Promise((resolve) => { + this.grpcClient.enqueue( + { + queue: item.message.queue, + headers: item.message.headers, + payload: item.message.payload, + }, + this.callMetadata(), + (err: grpc.ServiceError | null, resp?: EnqueueResponse__Output) => { + if (err) { + item.reject(mapEnqueueError(err)); + } else { + item.resolve(resp!.messageId); + } + resolve(); + } + ); + }); + } + + /** Flush multiple items via the BatchEnqueue RPC. */ + private flushMultiple(items: BatchItem[]): Promise { + const messages = items.map((item) => ({ + queue: item.message.queue, + headers: item.message.headers, + payload: item.message.payload, + })); + + return new Promise((resolve) => { + this.grpcClient.batchEnqueue( + { messages }, + this.callMetadata(), + ( + err: grpc.ServiceError | null, + resp?: BatchEnqueueResponse__Output + ) => { + if (err) { + // Transport-level failure: all items get the error. + const mapped = new RPCError(err.code, err.details); + for (const item of items) { + item.reject(mapped); + } + } else { + const results = resp!.results; + for (let i = 0; i < items.length; i++) { + const result = results[i]; + if (!result) { + items[i].reject( + new RPCError( + grpc.status.INTERNAL, + "server returned fewer results than messages sent" + ) + ); + continue; + } + if (result.result === "success" && result.success) { + items[i].resolve(result.success.messageId!); + } else if (result.result === "error" && result.error) { + items[i].reject( + new RPCError(grpc.status.INTERNAL, result.error) + ); + } else { + items[i].reject( + new RPCError(grpc.status.INTERNAL, "no result from server") + ); + } + } + } + resolve(); + } + ); + }); + } + +} diff --git a/src/client.ts b/src/client.ts index 0e6b51d..ad4b4a6 100644 --- a/src/client.ts +++ b/src/client.ts @@ -9,10 +9,11 @@ import { QueueNotFoundError, RPCError, } from "./errors"; -import type { ConsumeMessage } from "./types"; +import type { ConsumeMessage, EnqueueMessage, BatchEnqueueResult } from "./types"; import type { FilaServiceClient } from "../generated/fila/v1/FilaService"; import type { EnqueueResponse__Output } from "../generated/fila/v1/EnqueueResponse"; import type { ConsumeResponse__Output } from "../generated/fila/v1/ConsumeResponse"; +import { Batcher, type BatchMode } from "./batcher"; function resolveProtoDir(): string { // Source (dev/test): __dirname = /src/ @@ -98,7 +99,51 @@ function mapNackError(err: grpc.ServiceError): FilaError { return new RPCError(err.code, err.details); } -/** Connection options for TLS and authentication. */ +/** Map a ConsumeResponse to ConsumeMessage(s), skipping keepalive frames. */ +function mapConsumeResponse( + resp: ConsumeResponse__Output +): ConsumeMessage[] { + // Prefer the batched `messages` field when non-empty. + if (resp.messages && resp.messages.length > 0) { + const results: ConsumeMessage[] = []; + for (const msg of resp.messages) { + if (!msg || !msg.id) continue; + const metadata = msg.metadata; + results.push({ + id: msg.id, + headers: msg.headers ?? {}, + payload: Buffer.isBuffer(msg.payload) + ? msg.payload + : Buffer.from(msg.payload ?? ""), + fairnessKey: metadata?.fairnessKey ?? "", + attemptCount: metadata?.attemptCount ?? 0, + queue: metadata?.queueId ?? "", + }); + } + return results; + } + + // Fall back to singular `message` field (backward compatible). + const msg = resp.message; + if (!msg || !msg.id) { + return []; // keepalive frame + } + const metadata = msg.metadata; + return [ + { + id: msg.id, + headers: msg.headers ?? {}, + payload: Buffer.isBuffer(msg.payload) + ? msg.payload + : Buffer.from(msg.payload ?? ""), + fairnessKey: metadata?.fairnessKey ?? "", + attemptCount: metadata?.attemptCount ?? 0, + queue: metadata?.queueId ?? "", + }, + ]; +} + +/** Connection options for TLS, authentication, and batching. */ export interface ClientOptions { /** * Enable TLS using the OS system trust store for server verification. @@ -114,12 +159,31 @@ export interface ClientOptions { clientKey?: Buffer; /** API key for authentication. Sent as `authorization: Bearer ` metadata on every RPC. */ apiKey?: string; + /** + * Batch mode for enqueue() calls. + * + * - `'auto'` (DEFAULT): Opportunistic batching via setImmediate. Zero config, + * zero latency penalty at low load. Messages cluster naturally at high load. + * - `'linger'`: Timer-based batching with explicit `lingerMs` and `batchSize`. + * - `'disabled'`: No batching. Each enqueue() is a direct RPC. + * + * @default 'auto' + */ + batchMode?: "auto" | "linger" | "disabled"; + /** Maximum batch size for auto mode. Default: 100. */ + maxBatchSize?: number; + /** Linger time in milliseconds for linger mode. Required when batchMode is 'linger'. */ + lingerMs?: number; + /** Maximum messages per batch for linger mode. Required when batchMode is 'linger'. */ + batchSize?: number; } /** * Client for the Fila message broker. * * Wraps the hot-path gRPC operations: enqueue, consume, ack, nack. + * By default, enqueue() calls are automatically batched for optimal throughput + * with zero added latency at low load. * * @example * ```typescript @@ -128,18 +192,19 @@ export interface ClientOptions { * for await (const msg of client.consume("my-queue")) { * await client.ack("my-queue", msg.id); * } - * client.close(); + * await client.close(); * ``` */ export class Client { private readonly grpcClient: FilaServiceClient; private readonly creds: grpc.ChannelCredentials; private readonly apiKey?: string; + private readonly batcher: Batcher | null; /** * Connect to a Fila broker at the given address. * @param addr - Broker address in "host:port" format (e.g., "localhost:5555"). - * @param options - Optional TLS and authentication settings. + * @param options - Optional TLS, authentication, and batching settings. */ constructor(addr: string, options?: ClientOptions) { const hasClientCert = !!options?.clientCert; @@ -171,27 +236,34 @@ export class Client { this.grpcClient = createGrpcClient(addr, this.creds); this.apiKey = options?.apiKey; - } - /** Map a ConsumeResponse to a ConsumeMessage, or undefined for keepalive frames. */ - private static mapConsumeResponse( - resp: ConsumeResponse__Output - ): ConsumeMessage | undefined { - const msg = resp.message; - if (!msg || !msg.id) { - return undefined; // keepalive frame + // Initialize the batcher based on the configured mode. + const modeStr = options?.batchMode ?? "auto"; + if (modeStr === "disabled") { + this.batcher = null; + } else { + let batchMode: BatchMode; + if (modeStr === "linger") { + if (options?.lingerMs === undefined || options?.batchSize === undefined) { + throw new Error("lingerMs and batchSize are required when batchMode is 'linger'"); + } + batchMode = { + mode: "linger", + lingerMs: options.lingerMs, + batchSize: options.batchSize, + }; + } else { + batchMode = { + mode: "auto", + maxBatchSize: options?.maxBatchSize, + }; + } + this.batcher = new Batcher( + this.grpcClient, + () => this.callMetadata(), + batchMode + ); } - const metadata = msg.metadata; - return { - id: msg.id, - headers: msg.headers ?? {}, - payload: Buffer.isBuffer(msg.payload) - ? msg.payload - : Buffer.from(msg.payload ?? ""), - fairnessKey: metadata?.fairnessKey ?? "", - attemptCount: metadata?.attemptCount ?? 0, - queue: metadata?.queueId ?? "", - }; } /** Build gRPC metadata, attaching the API key if configured. */ @@ -203,13 +275,25 @@ export class Client { return md; } - /** Close the underlying gRPC channel. */ - close(): void { + /** + * Close the client, draining any pending batched messages first. + * Returns a promise that resolves when all pending messages have been + * flushed and the gRPC channel is closed. + */ + async close(): Promise { + if (this.batcher) { + await this.batcher.drain(); + } (this.grpcClient as unknown as grpc.Client).close(); } /** * Enqueue a message to the specified queue. + * + * When batching is enabled (default), the message is routed through the + * batcher. At low load, messages are sent individually. At high load, + * messages cluster naturally into BatchEnqueue RPCs. + * * @param queue - Target queue name. * @param headers - Optional message headers. * @param payload - Message payload bytes. @@ -222,6 +306,16 @@ export class Client { headers: Record | null, payload: Buffer ): Promise { + // Route through the batcher when enabled. + if (this.batcher) { + return this.batcher.submit({ + queue, + headers: headers ?? {}, + payload, + }); + } + + // No batching: direct RPC. return new Promise((resolve, reject) => { this.grpcClient.enqueue( { queue, headers: headers ?? {}, payload }, @@ -237,11 +331,75 @@ export class Client { }); } + /** + * Enqueue a batch of messages in a single RPC call. + * + * Each message is independently validated and processed. A failed message + * does not affect the others in the batch. Returns one result per input + * message, in the same order. + * + * This is more efficient than calling enqueue() in a loop because it + * amortizes the RPC overhead across all messages. + * + * @param messages - Array of messages to enqueue. + * @returns Per-message results (success with messageId, or error with description). + * @throws {RPCError} For transport-level failures affecting the entire batch. + */ + batchEnqueue(messages: EnqueueMessage[]): Promise { + // batchEnqueue always bypasses the batcher and uses a direct RPC. + // Create a temporary batcher-like object to reuse the RPC logic, + // or just call the gRPC client directly. + return this.doBatchEnqueue(messages); + } + + private doBatchEnqueue(messages: EnqueueMessage[]): Promise { + const protoMessages = messages.map((m) => ({ + queue: m.queue, + headers: m.headers, + payload: m.payload, + })); + + return new Promise((resolve, reject) => { + this.grpcClient.batchEnqueue( + { messages: protoMessages }, + this.callMetadata(), + (err: grpc.ServiceError | null, resp?) => { + if (err) { + reject(new RPCError(err.code, err.details)); + return; + } + + const results: BatchEnqueueResult[] = resp!.results.map( + (r: { result?: string; success?: { messageId?: string } | null; error?: string }) => { + if (r.result === "success" && r.success) { + return { + success: true as const, + messageId: r.success.messageId!, + }; + } else if (r.result === "error" && r.error) { + return { success: false as const, error: r.error }; + } else { + return { + success: false as const, + error: "no result from server", + }; + } + } + ); + + resolve(results); + } + ); + }); + } + /** * Open a streaming consumer on the specified queue. * * Returns an async iterable that yields messages as they become available. * Nil message frames (keepalive signals) are skipped automatically. + * Batched delivery frames (multiple messages per ConsumeResponse) are + * transparently unpacked into individual messages. * * If the server returns UNAVAILABLE with an `x-fila-leader-addr` metadata * header, the client transparently reconnects to the leader node and retries @@ -268,8 +426,10 @@ export class Client { try { for await (const resp of iterable) { - const mapped = Client.mapConsumeResponse(resp); - if (mapped) yield mapped; + const messages = mapConsumeResponse(resp); + for (const msg of messages) { + yield msg; + } } } catch (err) { const svcErr = err as grpc.ServiceError; @@ -289,8 +449,10 @@ export class Client { leaderStream as AsyncIterable; try { for await (const resp of leaderIterable) { - const mapped = Client.mapConsumeResponse(resp); - if (mapped) yield mapped; + const messages = mapConsumeResponse(resp); + for (const msg of messages) { + yield msg; + } } } catch (retryErr) { const retrySvcErr = retryErr as grpc.ServiceError; diff --git a/src/index.ts b/src/index.ts index e707b0f..e7a3302 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,6 +1,6 @@ export { Client } from "./client"; export type { ClientOptions } from "./client"; -export type { ConsumeMessage } from "./types"; +export type { ConsumeMessage, EnqueueMessage, BatchEnqueueResult } from "./types"; export { FilaError, QueueNotFoundError, diff --git a/src/types.ts b/src/types.ts index 4010bf3..cb9fd4b 100644 --- a/src/types.ts +++ b/src/types.ts @@ -13,3 +13,18 @@ export interface ConsumeMessage { /** Queue the message belongs to. */ queue: string; } + +/** A single message specification for enqueue operations. */ +export interface EnqueueMessage { + /** Target queue name. */ + queue: string; + /** Message headers (key-value pairs). */ + headers: Record; + /** Message payload bytes. */ + payload: Buffer; +} + +/** The result of a single message within a batch enqueue call. */ +export type BatchEnqueueResult = + | { success: true; messageId: string } + | { success: false; error: string }; diff --git a/test/auth.test.ts b/test/auth.test.ts index 98991d9..8ee6732 100644 --- a/test/auth.test.ts +++ b/test/auth.test.ts @@ -39,7 +39,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { ); expect(msgId).toBeTruthy(); } finally { - client.close(); + await client.close(); } }); @@ -55,7 +55,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { return true; }); } finally { - client.close(); + await client.close(); } }); @@ -67,7 +67,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { client.enqueue("auth-test-badkey", null, Buffer.from("fail")) ).rejects.toThrow(RPCError); } finally { - client.close(); + await client.close(); } }); @@ -86,7 +86,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { } expect(received).toBe(true); } finally { - client.close(); + await client.close(); } }); }); @@ -131,7 +131,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { ); expect(msgId).toBeTruthy(); } finally { - client.close(); + await client.close(); } }); @@ -143,7 +143,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { client.enqueue("tls-test-insecure", null, Buffer.from("fail")) ).rejects.toThrow(); } finally { - client.close(); + await client.close(); } }); }); @@ -199,7 +199,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { ); expect(msgId).toBeTruthy(); } finally { - client.close(); + await client.close(); } }); @@ -230,7 +230,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { } expect(received).toBe(true); } finally { - client.close(); + await client.close(); } }); }); diff --git a/test/batch.test.ts b/test/batch.test.ts new file mode 100644 index 0000000..ce47540 --- /dev/null +++ b/test/batch.test.ts @@ -0,0 +1,304 @@ +import { describe, it, expect, beforeAll, afterAll } from "vitest"; +import { Client } from "../src"; +import { QueueNotFoundError } from "../src/errors"; +import { + startTestServer, + FILA_SERVER_AVAILABLE, + type TestServer, +} from "./helpers"; + +describe.skipIf(!FILA_SERVER_AVAILABLE)("Batch operations", () => { + let server: TestServer; + + beforeAll(async () => { + server = await startTestServer(); + }); + + afterAll(() => { + server?.stop(); + }); + + describe("batchEnqueue", () => { + it("enqueues multiple messages in a single RPC", async () => { + await server.createQueue("batch-multi"); + + const client = new Client(server.addr, { batchMode: "disabled" }); + try { + const results = await client.batchEnqueue([ + { queue: "batch-multi", headers: { idx: "0" }, payload: Buffer.from("msg-0") }, + { queue: "batch-multi", headers: { idx: "1" }, payload: Buffer.from("msg-1") }, + { queue: "batch-multi", headers: { idx: "2" }, payload: Buffer.from("msg-2") }, + ]); + + expect(results).toHaveLength(3); + for (const result of results) { + expect(result.success).toBe(true); + if (result.success) { + expect(result.messageId).toBeTruthy(); + } + } + + // Verify all messages are consumable. + const received: string[] = []; + let count = 0; + for await (const msg of client.consume("batch-multi")) { + received.push(msg.payload.toString()); + await client.ack("batch-multi", msg.id); + count++; + if (count >= 3) break; + } + expect(received).toContain("msg-0"); + expect(received).toContain("msg-1"); + expect(received).toContain("msg-2"); + } finally { + await client.close(); + } + }); + + it("returns per-message errors for nonexistent queues", async () => { + await server.createQueue("batch-partial"); + + const client = new Client(server.addr, { batchMode: "disabled" }); + try { + const results = await client.batchEnqueue([ + { queue: "batch-partial", headers: {}, payload: Buffer.from("ok") }, + { queue: "no-such-queue", headers: {}, payload: Buffer.from("fail") }, + ]); + + expect(results).toHaveLength(2); + expect(results[0].success).toBe(true); + expect(results[1].success).toBe(false); + if (!results[1].success) { + expect(results[1].error).toBeTruthy(); + } + } finally { + await client.close(); + } + }); + + it("returns message IDs in same order as input", async () => { + await server.createQueue("batch-order"); + + const client = new Client(server.addr, { batchMode: "disabled" }); + try { + const results = await client.batchEnqueue([ + { queue: "batch-order", headers: {}, payload: Buffer.from("first") }, + { queue: "batch-order", headers: {}, payload: Buffer.from("second") }, + ]); + + expect(results).toHaveLength(2); + expect(results[0].success).toBe(true); + expect(results[1].success).toBe(true); + if (results[0].success && results[1].success) { + expect(results[0].messageId).not.toBe(results[1].messageId); + } + } finally { + await client.close(); + } + }); + }); + + describe("auto batch mode (default)", () => { + it("enqueue works with default auto batching", async () => { + await server.createQueue("auto-batch"); + + const client = new Client(server.addr); + try { + const msgId = await client.enqueue( + "auto-batch", + { key: "value" }, + Buffer.from("batched-msg") + ); + expect(msgId).toBeTruthy(); + + let received = false; + for await (const msg of client.consume("auto-batch")) { + expect(msg.id).toBe(msgId); + expect(msg.headers).toEqual({ key: "value" }); + expect(msg.payload.toString()).toBe("batched-msg"); + await client.ack("auto-batch", msg.id); + received = true; + break; + } + expect(received).toBe(true); + } finally { + await client.close(); + } + }); + + it("multiple concurrent enqueues are batched together", async () => { + await server.createQueue("auto-concurrent"); + + const client = new Client(server.addr); + try { + // Fire multiple enqueues concurrently — they should batch together + // since they arrive within the same event loop turn. + const promises = Array.from({ length: 5 }, (_, i) => + client.enqueue( + "auto-concurrent", + { idx: String(i) }, + Buffer.from(`msg-${i}`) + ) + ); + + const messageIds = await Promise.all(promises); + expect(messageIds).toHaveLength(5); + for (const id of messageIds) { + expect(id).toBeTruthy(); + } + // All IDs should be unique. + expect(new Set(messageIds).size).toBe(5); + } finally { + await client.close(); + } + }); + + it("preserves QueueNotFoundError for single-item batches", async () => { + const client = new Client(server.addr); + try { + // Single message to nonexistent queue: should get QueueNotFoundError + // because single-item batches use Enqueue RPC. + await expect( + client.enqueue("no-such-queue-auto", null, Buffer.from("fail")) + ).rejects.toThrow(QueueNotFoundError); + } finally { + await client.close(); + } + }); + }); + + describe("disabled batch mode", () => { + it("enqueue works with batching disabled", async () => { + await server.createQueue("no-batch"); + + const client = new Client(server.addr, { batchMode: "disabled" }); + try { + const msgId = await client.enqueue( + "no-batch", + null, + Buffer.from("direct") + ); + expect(msgId).toBeTruthy(); + + let received = false; + for await (const msg of client.consume("no-batch")) { + expect(msg.id).toBe(msgId); + expect(msg.payload.toString()).toBe("direct"); + await client.ack("no-batch", msg.id); + received = true; + break; + } + expect(received).toBe(true); + } finally { + await client.close(); + } + }); + + it("enqueue to nonexistent queue throws QueueNotFoundError", async () => { + const client = new Client(server.addr, { batchMode: "disabled" }); + try { + await expect( + client.enqueue("no-such-queue-disabled", null, Buffer.from("fail")) + ).rejects.toThrow(QueueNotFoundError); + } finally { + await client.close(); + } + }); + }); + + describe("linger batch mode", () => { + it("enqueue flushes after lingerMs timeout", async () => { + await server.createQueue("linger-batch"); + + const client = new Client(server.addr, { + batchMode: "linger", + lingerMs: 50, + batchSize: 100, + }); + try { + const msgId = await client.enqueue( + "linger-batch", + null, + Buffer.from("lingered") + ); + expect(msgId).toBeTruthy(); + } finally { + await client.close(); + } + }); + + it("enqueue flushes when batch size is reached", async () => { + await server.createQueue("linger-full"); + + const client = new Client(server.addr, { + batchMode: "linger", + lingerMs: 5000, // Long timer — batch should flush by size first. + batchSize: 3, + }); + try { + const promises = Array.from({ length: 3 }, (_, i) => + client.enqueue( + "linger-full", + { idx: String(i) }, + Buffer.from(`msg-${i}`) + ) + ); + + const messageIds = await Promise.all(promises); + expect(messageIds).toHaveLength(3); + for (const id of messageIds) { + expect(id).toBeTruthy(); + } + } finally { + await client.close(); + } + }); + + it("constructor requires lingerMs and batchSize", () => { + expect( + () => new Client("localhost:5555", { batchMode: "linger" } as ClientOptions) + ).toThrow("lingerMs and batchSize are required"); + }); + }); + + describe("close() drains pending messages", () => { + it("close drains auto-batched messages before disconnecting", async () => { + await server.createQueue("close-drain"); + + const client = new Client(server.addr); + + // Enqueue a message and immediately close. + const enqueuePromise = client.enqueue( + "close-drain", + null, + Buffer.from("drained") + ); + + // Close should wait for pending messages. + await client.close(); + + // The enqueue should have completed before close returned. + const msgId = await enqueuePromise; + expect(msgId).toBeTruthy(); + + // Verify the message arrived at the server. + const verifyClient = new Client(server.addr, { batchMode: "disabled" }); + try { + let received = false; + for await (const msg of verifyClient.consume("close-drain")) { + expect(msg.id).toBe(msgId); + expect(msg.payload.toString()).toBe("drained"); + await verifyClient.ack("close-drain", msg.id); + received = true; + break; + } + expect(received).toBe(true); + } finally { + await verifyClient.close(); + } + }); + }); +}); + +// Import ClientOptions type for the constructor test. +import type { ClientOptions } from "../src/client"; diff --git a/test/batcher.unit.test.ts b/test/batcher.unit.test.ts new file mode 100644 index 0000000..20ae8e2 --- /dev/null +++ b/test/batcher.unit.test.ts @@ -0,0 +1,54 @@ +import { describe, it, expect } from "vitest"; +import { Client } from "../src"; + +describe("Batcher unit tests (no server)", () => { + it("default batch mode is auto", () => { + // Creating a client with default options should not throw. + // The batcher is initialized but won't do anything until enqueue is called. + const client = new Client("localhost:9999"); + // close() should succeed even without a real server (just closes channel). + client.close(); + }); + + it("disabled batch mode creates no batcher", () => { + const client = new Client("localhost:9999", { batchMode: "disabled" }); + client.close(); + }); + + it("auto batch mode with custom maxBatchSize", () => { + const client = new Client("localhost:9999", { + batchMode: "auto", + maxBatchSize: 50, + }); + client.close(); + }); + + it("linger mode requires lingerMs and batchSize", () => { + expect( + () => + new Client("localhost:9999", { + batchMode: "linger", + } as import("../src/client").ClientOptions) + ).toThrow("lingerMs and batchSize are required"); + }); + + it("linger mode accepts valid config", () => { + const client = new Client("localhost:9999", { + batchMode: "linger", + lingerMs: 10, + batchSize: 5, + }); + client.close(); + }); + + it("close() resolves immediately when no pending messages", async () => { + const client = new Client("localhost:9999"); + await client.close(); + // Should not hang. + }); + + it("close() resolves immediately when batching is disabled", async () => { + const client = new Client("localhost:9999", { batchMode: "disabled" }); + await client.close(); + }); +}); diff --git a/test/client.test.ts b/test/client.test.ts index 7c21ba3..d546b28 100644 --- a/test/client.test.ts +++ b/test/client.test.ts @@ -45,7 +45,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Client", () => { } expect(received).toBe(true); } finally { - client.close(); + await client.close(); } }); @@ -77,7 +77,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Client", () => { } expect(deliveryCount).toBe(1); } finally { - client.close(); + await client.close(); } }); @@ -88,7 +88,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Client", () => { client.enqueue("no-such-queue", null, Buffer.from("fail")) ).rejects.toThrow(QueueNotFoundError); } finally { - client.close(); + await client.close(); } }); }); From fe3484e982db15aee54624ce6f0c53640d204c41 Mon Sep 17 00:00:00 2001 From: Lucas Vieira Date: Wed, 25 Mar 2026 00:09:38 -0300 Subject: [PATCH 6/8] feat: unified api surface from story 30.1 - update service.proto: single Enqueue RPC with repeated messages, typed per-result errors for enqueue/ack/nack, consume uses only repeated messages field - remove batchEnqueue(), add enqueueMany() (no "batch" prefix) - enqueue() wraps single message in repeated, parses first result - ack()/nack() wrap single item in repeated, parse per-result errors - consume() uses only messages field (singular message field removed) - batcher uses unified Enqueue RPC for all batch sizes - fix drain race: track in-flight RPCs to prevent premature resolution - regenerate proto types, remove stale BatchEnqueue* generated files --- generated/fila/v1/AckError.ts | 13 ++ generated/fila/v1/AckErrorCode.ts | 20 +++ generated/fila/v1/AckMessage.ts | 12 ++ generated/fila/v1/AckRequest.ts | 7 +- generated/fila/v1/AckResponse.ts | 3 + generated/fila/v1/AckResult.ts | 16 ++ generated/fila/v1/AckSuccess.ts | 8 + generated/fila/v1/BatchEnqueueRequest.ts | 11 -- generated/fila/v1/BatchEnqueueResponse.ts | 11 -- generated/fila/v1/BatchEnqueueResult.ts | 15 -- generated/fila/v1/ConsumeResponse.ts | 2 - generated/fila/v1/EnqueueError.ts | 13 ++ generated/fila/v1/EnqueueErrorCode.ts | 23 +++ generated/fila/v1/EnqueueMessage.ts | 14 ++ generated/fila/v1/EnqueueRequest.ts | 9 +- generated/fila/v1/EnqueueResponse.ts | 5 +- generated/fila/v1/EnqueueResult.ts | 15 ++ generated/fila/v1/FilaService.ts | 24 ++- generated/fila/v1/NackError.ts | 13 ++ generated/fila/v1/NackErrorCode.ts | 20 +++ generated/fila/v1/NackMessage.ts | 14 ++ generated/fila/v1/NackRequest.ts | 9 +- generated/fila/v1/NackResponse.ts | 3 + generated/fila/v1/NackResult.ts | 16 ++ generated/fila/v1/NackSuccess.ts | 8 + generated/fila/v1/StreamEnqueueRequest.ts | 14 ++ generated/fila/v1/StreamEnqueueResponse.ts | 14 ++ generated/service.ts | 21 ++- proto/fila/v1/service.proto | 116 ++++++++++-- src/batcher.ts | 80 +++------ src/client.ts | 196 ++++++++++++--------- src/index.ts | 2 +- src/types.ts | 4 +- test/batch.test.ts | 34 ++-- 34 files changed, 536 insertions(+), 249 deletions(-) create mode 100644 generated/fila/v1/AckError.ts create mode 100644 generated/fila/v1/AckErrorCode.ts create mode 100644 generated/fila/v1/AckMessage.ts create mode 100644 generated/fila/v1/AckResult.ts create mode 100644 generated/fila/v1/AckSuccess.ts delete mode 100644 generated/fila/v1/BatchEnqueueRequest.ts delete mode 100644 generated/fila/v1/BatchEnqueueResponse.ts delete mode 100644 generated/fila/v1/BatchEnqueueResult.ts create mode 100644 generated/fila/v1/EnqueueError.ts create mode 100644 generated/fila/v1/EnqueueErrorCode.ts create mode 100644 generated/fila/v1/EnqueueMessage.ts create mode 100644 generated/fila/v1/EnqueueResult.ts create mode 100644 generated/fila/v1/NackError.ts create mode 100644 generated/fila/v1/NackErrorCode.ts create mode 100644 generated/fila/v1/NackMessage.ts create mode 100644 generated/fila/v1/NackResult.ts create mode 100644 generated/fila/v1/NackSuccess.ts create mode 100644 generated/fila/v1/StreamEnqueueRequest.ts create mode 100644 generated/fila/v1/StreamEnqueueResponse.ts diff --git a/generated/fila/v1/AckError.ts b/generated/fila/v1/AckError.ts new file mode 100644 index 0000000..bb2f56b --- /dev/null +++ b/generated/fila/v1/AckError.ts @@ -0,0 +1,13 @@ +// Original file: proto/fila/v1/service.proto + +import type { AckErrorCode as _fila_v1_AckErrorCode, AckErrorCode__Output as _fila_v1_AckErrorCode__Output } from '../../fila/v1/AckErrorCode'; + +export interface AckError { + 'code'?: (_fila_v1_AckErrorCode); + 'message'?: (string); +} + +export interface AckError__Output { + 'code': (_fila_v1_AckErrorCode__Output); + 'message': (string); +} diff --git a/generated/fila/v1/AckErrorCode.ts b/generated/fila/v1/AckErrorCode.ts new file mode 100644 index 0000000..04a2113 --- /dev/null +++ b/generated/fila/v1/AckErrorCode.ts @@ -0,0 +1,20 @@ +// Original file: proto/fila/v1/service.proto + +export const AckErrorCode = { + ACK_ERROR_CODE_UNSPECIFIED: 'ACK_ERROR_CODE_UNSPECIFIED', + ACK_ERROR_CODE_MESSAGE_NOT_FOUND: 'ACK_ERROR_CODE_MESSAGE_NOT_FOUND', + ACK_ERROR_CODE_STORAGE: 'ACK_ERROR_CODE_STORAGE', + ACK_ERROR_CODE_PERMISSION_DENIED: 'ACK_ERROR_CODE_PERMISSION_DENIED', +} as const; + +export type AckErrorCode = + | 'ACK_ERROR_CODE_UNSPECIFIED' + | 0 + | 'ACK_ERROR_CODE_MESSAGE_NOT_FOUND' + | 1 + | 'ACK_ERROR_CODE_STORAGE' + | 2 + | 'ACK_ERROR_CODE_PERMISSION_DENIED' + | 3 + +export type AckErrorCode__Output = typeof AckErrorCode[keyof typeof AckErrorCode] diff --git a/generated/fila/v1/AckMessage.ts b/generated/fila/v1/AckMessage.ts new file mode 100644 index 0000000..dac5ff4 --- /dev/null +++ b/generated/fila/v1/AckMessage.ts @@ -0,0 +1,12 @@ +// Original file: proto/fila/v1/service.proto + + +export interface AckMessage { + 'queue'?: (string); + 'messageId'?: (string); +} + +export interface AckMessage__Output { + 'queue': (string); + 'messageId': (string); +} diff --git a/generated/fila/v1/AckRequest.ts b/generated/fila/v1/AckRequest.ts index 53f7b92..3c63ce1 100644 --- a/generated/fila/v1/AckRequest.ts +++ b/generated/fila/v1/AckRequest.ts @@ -1,12 +1,11 @@ // Original file: proto/fila/v1/service.proto +import type { AckMessage as _fila_v1_AckMessage, AckMessage__Output as _fila_v1_AckMessage__Output } from '../../fila/v1/AckMessage'; export interface AckRequest { - 'queue'?: (string); - 'messageId'?: (string); + 'messages'?: (_fila_v1_AckMessage)[]; } export interface AckRequest__Output { - 'queue': (string); - 'messageId': (string); + 'messages': (_fila_v1_AckMessage__Output)[]; } diff --git a/generated/fila/v1/AckResponse.ts b/generated/fila/v1/AckResponse.ts index 9829f92..eae1240 100644 --- a/generated/fila/v1/AckResponse.ts +++ b/generated/fila/v1/AckResponse.ts @@ -1,8 +1,11 @@ // Original file: proto/fila/v1/service.proto +import type { AckResult as _fila_v1_AckResult, AckResult__Output as _fila_v1_AckResult__Output } from '../../fila/v1/AckResult'; export interface AckResponse { + 'results'?: (_fila_v1_AckResult)[]; } export interface AckResponse__Output { + 'results': (_fila_v1_AckResult__Output)[]; } diff --git a/generated/fila/v1/AckResult.ts b/generated/fila/v1/AckResult.ts new file mode 100644 index 0000000..d503b9a --- /dev/null +++ b/generated/fila/v1/AckResult.ts @@ -0,0 +1,16 @@ +// Original file: proto/fila/v1/service.proto + +import type { AckSuccess as _fila_v1_AckSuccess, AckSuccess__Output as _fila_v1_AckSuccess__Output } from '../../fila/v1/AckSuccess'; +import type { AckError as _fila_v1_AckError, AckError__Output as _fila_v1_AckError__Output } from '../../fila/v1/AckError'; + +export interface AckResult { + 'success'?: (_fila_v1_AckSuccess | null); + 'error'?: (_fila_v1_AckError | null); + 'result'?: "success"|"error"; +} + +export interface AckResult__Output { + 'success'?: (_fila_v1_AckSuccess__Output | null); + 'error'?: (_fila_v1_AckError__Output | null); + 'result'?: "success"|"error"; +} diff --git a/generated/fila/v1/AckSuccess.ts b/generated/fila/v1/AckSuccess.ts new file mode 100644 index 0000000..673d632 --- /dev/null +++ b/generated/fila/v1/AckSuccess.ts @@ -0,0 +1,8 @@ +// Original file: proto/fila/v1/service.proto + + +export interface AckSuccess { +} + +export interface AckSuccess__Output { +} diff --git a/generated/fila/v1/BatchEnqueueRequest.ts b/generated/fila/v1/BatchEnqueueRequest.ts deleted file mode 100644 index afd9d70..0000000 --- a/generated/fila/v1/BatchEnqueueRequest.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { EnqueueRequest as _fila_v1_EnqueueRequest, EnqueueRequest__Output as _fila_v1_EnqueueRequest__Output } from '../../fila/v1/EnqueueRequest'; - -export interface BatchEnqueueRequest { - 'messages'?: (_fila_v1_EnqueueRequest)[]; -} - -export interface BatchEnqueueRequest__Output { - 'messages': (_fila_v1_EnqueueRequest__Output)[]; -} diff --git a/generated/fila/v1/BatchEnqueueResponse.ts b/generated/fila/v1/BatchEnqueueResponse.ts deleted file mode 100644 index 0a876cb..0000000 --- a/generated/fila/v1/BatchEnqueueResponse.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { BatchEnqueueResult as _fila_v1_BatchEnqueueResult, BatchEnqueueResult__Output as _fila_v1_BatchEnqueueResult__Output } from '../../fila/v1/BatchEnqueueResult'; - -export interface BatchEnqueueResponse { - 'results'?: (_fila_v1_BatchEnqueueResult)[]; -} - -export interface BatchEnqueueResponse__Output { - 'results': (_fila_v1_BatchEnqueueResult__Output)[]; -} diff --git a/generated/fila/v1/BatchEnqueueResult.ts b/generated/fila/v1/BatchEnqueueResult.ts deleted file mode 100644 index 9b08473..0000000 --- a/generated/fila/v1/BatchEnqueueResult.ts +++ /dev/null @@ -1,15 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { EnqueueResponse as _fila_v1_EnqueueResponse, EnqueueResponse__Output as _fila_v1_EnqueueResponse__Output } from '../../fila/v1/EnqueueResponse'; - -export interface BatchEnqueueResult { - 'success'?: (_fila_v1_EnqueueResponse | null); - 'error'?: (string); - 'result'?: "success"|"error"; -} - -export interface BatchEnqueueResult__Output { - 'success'?: (_fila_v1_EnqueueResponse__Output | null); - 'error'?: (string); - 'result'?: "success"|"error"; -} diff --git a/generated/fila/v1/ConsumeResponse.ts b/generated/fila/v1/ConsumeResponse.ts index 9296e26..bc1f398 100644 --- a/generated/fila/v1/ConsumeResponse.ts +++ b/generated/fila/v1/ConsumeResponse.ts @@ -3,11 +3,9 @@ import type { Message as _fila_v1_Message, Message__Output as _fila_v1_Message__Output } from '../../fila/v1/Message'; export interface ConsumeResponse { - 'message'?: (_fila_v1_Message | null); 'messages'?: (_fila_v1_Message)[]; } export interface ConsumeResponse__Output { - 'message': (_fila_v1_Message__Output | null); 'messages': (_fila_v1_Message__Output)[]; } diff --git a/generated/fila/v1/EnqueueError.ts b/generated/fila/v1/EnqueueError.ts new file mode 100644 index 0000000..82fe40e --- /dev/null +++ b/generated/fila/v1/EnqueueError.ts @@ -0,0 +1,13 @@ +// Original file: proto/fila/v1/service.proto + +import type { EnqueueErrorCode as _fila_v1_EnqueueErrorCode, EnqueueErrorCode__Output as _fila_v1_EnqueueErrorCode__Output } from '../../fila/v1/EnqueueErrorCode'; + +export interface EnqueueError { + 'code'?: (_fila_v1_EnqueueErrorCode); + 'message'?: (string); +} + +export interface EnqueueError__Output { + 'code': (_fila_v1_EnqueueErrorCode__Output); + 'message': (string); +} diff --git a/generated/fila/v1/EnqueueErrorCode.ts b/generated/fila/v1/EnqueueErrorCode.ts new file mode 100644 index 0000000..cc0f38d --- /dev/null +++ b/generated/fila/v1/EnqueueErrorCode.ts @@ -0,0 +1,23 @@ +// Original file: proto/fila/v1/service.proto + +export const EnqueueErrorCode = { + ENQUEUE_ERROR_CODE_UNSPECIFIED: 'ENQUEUE_ERROR_CODE_UNSPECIFIED', + ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND: 'ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND', + ENQUEUE_ERROR_CODE_STORAGE: 'ENQUEUE_ERROR_CODE_STORAGE', + ENQUEUE_ERROR_CODE_LUA: 'ENQUEUE_ERROR_CODE_LUA', + ENQUEUE_ERROR_CODE_PERMISSION_DENIED: 'ENQUEUE_ERROR_CODE_PERMISSION_DENIED', +} as const; + +export type EnqueueErrorCode = + | 'ENQUEUE_ERROR_CODE_UNSPECIFIED' + | 0 + | 'ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND' + | 1 + | 'ENQUEUE_ERROR_CODE_STORAGE' + | 2 + | 'ENQUEUE_ERROR_CODE_LUA' + | 3 + | 'ENQUEUE_ERROR_CODE_PERMISSION_DENIED' + | 4 + +export type EnqueueErrorCode__Output = typeof EnqueueErrorCode[keyof typeof EnqueueErrorCode] diff --git a/generated/fila/v1/EnqueueMessage.ts b/generated/fila/v1/EnqueueMessage.ts new file mode 100644 index 0000000..04b917d --- /dev/null +++ b/generated/fila/v1/EnqueueMessage.ts @@ -0,0 +1,14 @@ +// Original file: proto/fila/v1/service.proto + + +export interface EnqueueMessage { + 'queue'?: (string); + 'headers'?: ({[key: string]: string}); + 'payload'?: (Buffer | Uint8Array | string); +} + +export interface EnqueueMessage__Output { + 'queue': (string); + 'headers': ({[key: string]: string}); + 'payload': (Buffer); +} diff --git a/generated/fila/v1/EnqueueRequest.ts b/generated/fila/v1/EnqueueRequest.ts index 2ccd490..e6f0c8e 100644 --- a/generated/fila/v1/EnqueueRequest.ts +++ b/generated/fila/v1/EnqueueRequest.ts @@ -1,14 +1,11 @@ // Original file: proto/fila/v1/service.proto +import type { EnqueueMessage as _fila_v1_EnqueueMessage, EnqueueMessage__Output as _fila_v1_EnqueueMessage__Output } from '../../fila/v1/EnqueueMessage'; export interface EnqueueRequest { - 'queue'?: (string); - 'headers'?: ({[key: string]: string}); - 'payload'?: (Buffer | Uint8Array | string); + 'messages'?: (_fila_v1_EnqueueMessage)[]; } export interface EnqueueRequest__Output { - 'queue': (string); - 'headers': ({[key: string]: string}); - 'payload': (Buffer); + 'messages': (_fila_v1_EnqueueMessage__Output)[]; } diff --git a/generated/fila/v1/EnqueueResponse.ts b/generated/fila/v1/EnqueueResponse.ts index a44e5ef..271c440 100644 --- a/generated/fila/v1/EnqueueResponse.ts +++ b/generated/fila/v1/EnqueueResponse.ts @@ -1,10 +1,11 @@ // Original file: proto/fila/v1/service.proto +import type { EnqueueResult as _fila_v1_EnqueueResult, EnqueueResult__Output as _fila_v1_EnqueueResult__Output } from '../../fila/v1/EnqueueResult'; export interface EnqueueResponse { - 'messageId'?: (string); + 'results'?: (_fila_v1_EnqueueResult)[]; } export interface EnqueueResponse__Output { - 'messageId': (string); + 'results': (_fila_v1_EnqueueResult__Output)[]; } diff --git a/generated/fila/v1/EnqueueResult.ts b/generated/fila/v1/EnqueueResult.ts new file mode 100644 index 0000000..3691008 --- /dev/null +++ b/generated/fila/v1/EnqueueResult.ts @@ -0,0 +1,15 @@ +// Original file: proto/fila/v1/service.proto + +import type { EnqueueError as _fila_v1_EnqueueError, EnqueueError__Output as _fila_v1_EnqueueError__Output } from '../../fila/v1/EnqueueError'; + +export interface EnqueueResult { + 'messageId'?: (string); + 'error'?: (_fila_v1_EnqueueError | null); + 'result'?: "messageId"|"error"; +} + +export interface EnqueueResult__Output { + 'messageId'?: (string); + 'error'?: (_fila_v1_EnqueueError__Output | null); + 'result'?: "messageId"|"error"; +} diff --git a/generated/fila/v1/FilaService.ts b/generated/fila/v1/FilaService.ts index 9b23bad..868ec1b 100644 --- a/generated/fila/v1/FilaService.ts +++ b/generated/fila/v1/FilaService.ts @@ -4,14 +4,14 @@ import type * as grpc from '@grpc/grpc-js' import type { MethodDefinition } from '@grpc/proto-loader' import type { AckRequest as _fila_v1_AckRequest, AckRequest__Output as _fila_v1_AckRequest__Output } from '../../fila/v1/AckRequest'; import type { AckResponse as _fila_v1_AckResponse, AckResponse__Output as _fila_v1_AckResponse__Output } from '../../fila/v1/AckResponse'; -import type { BatchEnqueueRequest as _fila_v1_BatchEnqueueRequest, BatchEnqueueRequest__Output as _fila_v1_BatchEnqueueRequest__Output } from '../../fila/v1/BatchEnqueueRequest'; -import type { BatchEnqueueResponse as _fila_v1_BatchEnqueueResponse, BatchEnqueueResponse__Output as _fila_v1_BatchEnqueueResponse__Output } from '../../fila/v1/BatchEnqueueResponse'; import type { ConsumeRequest as _fila_v1_ConsumeRequest, ConsumeRequest__Output as _fila_v1_ConsumeRequest__Output } from '../../fila/v1/ConsumeRequest'; import type { ConsumeResponse as _fila_v1_ConsumeResponse, ConsumeResponse__Output as _fila_v1_ConsumeResponse__Output } from '../../fila/v1/ConsumeResponse'; import type { EnqueueRequest as _fila_v1_EnqueueRequest, EnqueueRequest__Output as _fila_v1_EnqueueRequest__Output } from '../../fila/v1/EnqueueRequest'; import type { EnqueueResponse as _fila_v1_EnqueueResponse, EnqueueResponse__Output as _fila_v1_EnqueueResponse__Output } from '../../fila/v1/EnqueueResponse'; import type { NackRequest as _fila_v1_NackRequest, NackRequest__Output as _fila_v1_NackRequest__Output } from '../../fila/v1/NackRequest'; import type { NackResponse as _fila_v1_NackResponse, NackResponse__Output as _fila_v1_NackResponse__Output } from '../../fila/v1/NackResponse'; +import type { StreamEnqueueRequest as _fila_v1_StreamEnqueueRequest, StreamEnqueueRequest__Output as _fila_v1_StreamEnqueueRequest__Output } from '../../fila/v1/StreamEnqueueRequest'; +import type { StreamEnqueueResponse as _fila_v1_StreamEnqueueResponse, StreamEnqueueResponse__Output as _fila_v1_StreamEnqueueResponse__Output } from '../../fila/v1/StreamEnqueueResponse'; export interface FilaServiceClient extends grpc.Client { Ack(argument: _fila_v1_AckRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; @@ -23,15 +23,6 @@ export interface FilaServiceClient extends grpc.Client { ack(argument: _fila_v1_AckRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; ack(argument: _fila_v1_AckRequest, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; - BatchEnqueue(argument: _fila_v1_BatchEnqueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; - BatchEnqueue(argument: _fila_v1_BatchEnqueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; - BatchEnqueue(argument: _fila_v1_BatchEnqueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; - BatchEnqueue(argument: _fila_v1_BatchEnqueueRequest, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; - batchEnqueue(argument: _fila_v1_BatchEnqueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; - batchEnqueue(argument: _fila_v1_BatchEnqueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; - batchEnqueue(argument: _fila_v1_BatchEnqueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; - batchEnqueue(argument: _fila_v1_BatchEnqueueRequest, callback: grpc.requestCallback<_fila_v1_BatchEnqueueResponse__Output>): grpc.ClientUnaryCall; - Consume(argument: _fila_v1_ConsumeRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; Consume(argument: _fila_v1_ConsumeRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; consume(argument: _fila_v1_ConsumeRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; @@ -55,25 +46,30 @@ export interface FilaServiceClient extends grpc.Client { nack(argument: _fila_v1_NackRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; nack(argument: _fila_v1_NackRequest, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; + StreamEnqueue(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse__Output>; + StreamEnqueue(options?: grpc.CallOptions): grpc.ClientDuplexStream<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse__Output>; + streamEnqueue(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse__Output>; + streamEnqueue(options?: grpc.CallOptions): grpc.ClientDuplexStream<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse__Output>; + } export interface FilaServiceHandlers extends grpc.UntypedServiceImplementation { Ack: grpc.handleUnaryCall<_fila_v1_AckRequest__Output, _fila_v1_AckResponse>; - BatchEnqueue: grpc.handleUnaryCall<_fila_v1_BatchEnqueueRequest__Output, _fila_v1_BatchEnqueueResponse>; - Consume: grpc.handleServerStreamingCall<_fila_v1_ConsumeRequest__Output, _fila_v1_ConsumeResponse>; Enqueue: grpc.handleUnaryCall<_fila_v1_EnqueueRequest__Output, _fila_v1_EnqueueResponse>; Nack: grpc.handleUnaryCall<_fila_v1_NackRequest__Output, _fila_v1_NackResponse>; + StreamEnqueue: grpc.handleBidiStreamingCall<_fila_v1_StreamEnqueueRequest__Output, _fila_v1_StreamEnqueueResponse>; + } export interface FilaServiceDefinition extends grpc.ServiceDefinition { Ack: MethodDefinition<_fila_v1_AckRequest, _fila_v1_AckResponse, _fila_v1_AckRequest__Output, _fila_v1_AckResponse__Output> - BatchEnqueue: MethodDefinition<_fila_v1_BatchEnqueueRequest, _fila_v1_BatchEnqueueResponse, _fila_v1_BatchEnqueueRequest__Output, _fila_v1_BatchEnqueueResponse__Output> Consume: MethodDefinition<_fila_v1_ConsumeRequest, _fila_v1_ConsumeResponse, _fila_v1_ConsumeRequest__Output, _fila_v1_ConsumeResponse__Output> Enqueue: MethodDefinition<_fila_v1_EnqueueRequest, _fila_v1_EnqueueResponse, _fila_v1_EnqueueRequest__Output, _fila_v1_EnqueueResponse__Output> Nack: MethodDefinition<_fila_v1_NackRequest, _fila_v1_NackResponse, _fila_v1_NackRequest__Output, _fila_v1_NackResponse__Output> + StreamEnqueue: MethodDefinition<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse, _fila_v1_StreamEnqueueRequest__Output, _fila_v1_StreamEnqueueResponse__Output> } diff --git a/generated/fila/v1/NackError.ts b/generated/fila/v1/NackError.ts new file mode 100644 index 0000000..2cc2888 --- /dev/null +++ b/generated/fila/v1/NackError.ts @@ -0,0 +1,13 @@ +// Original file: proto/fila/v1/service.proto + +import type { NackErrorCode as _fila_v1_NackErrorCode, NackErrorCode__Output as _fila_v1_NackErrorCode__Output } from '../../fila/v1/NackErrorCode'; + +export interface NackError { + 'code'?: (_fila_v1_NackErrorCode); + 'message'?: (string); +} + +export interface NackError__Output { + 'code': (_fila_v1_NackErrorCode__Output); + 'message': (string); +} diff --git a/generated/fila/v1/NackErrorCode.ts b/generated/fila/v1/NackErrorCode.ts new file mode 100644 index 0000000..e7738f7 --- /dev/null +++ b/generated/fila/v1/NackErrorCode.ts @@ -0,0 +1,20 @@ +// Original file: proto/fila/v1/service.proto + +export const NackErrorCode = { + NACK_ERROR_CODE_UNSPECIFIED: 'NACK_ERROR_CODE_UNSPECIFIED', + NACK_ERROR_CODE_MESSAGE_NOT_FOUND: 'NACK_ERROR_CODE_MESSAGE_NOT_FOUND', + NACK_ERROR_CODE_STORAGE: 'NACK_ERROR_CODE_STORAGE', + NACK_ERROR_CODE_PERMISSION_DENIED: 'NACK_ERROR_CODE_PERMISSION_DENIED', +} as const; + +export type NackErrorCode = + | 'NACK_ERROR_CODE_UNSPECIFIED' + | 0 + | 'NACK_ERROR_CODE_MESSAGE_NOT_FOUND' + | 1 + | 'NACK_ERROR_CODE_STORAGE' + | 2 + | 'NACK_ERROR_CODE_PERMISSION_DENIED' + | 3 + +export type NackErrorCode__Output = typeof NackErrorCode[keyof typeof NackErrorCode] diff --git a/generated/fila/v1/NackMessage.ts b/generated/fila/v1/NackMessage.ts new file mode 100644 index 0000000..2ce0501 --- /dev/null +++ b/generated/fila/v1/NackMessage.ts @@ -0,0 +1,14 @@ +// Original file: proto/fila/v1/service.proto + + +export interface NackMessage { + 'queue'?: (string); + 'messageId'?: (string); + 'error'?: (string); +} + +export interface NackMessage__Output { + 'queue': (string); + 'messageId': (string); + 'error': (string); +} diff --git a/generated/fila/v1/NackRequest.ts b/generated/fila/v1/NackRequest.ts index b7b03f4..2f2450d 100644 --- a/generated/fila/v1/NackRequest.ts +++ b/generated/fila/v1/NackRequest.ts @@ -1,14 +1,11 @@ // Original file: proto/fila/v1/service.proto +import type { NackMessage as _fila_v1_NackMessage, NackMessage__Output as _fila_v1_NackMessage__Output } from '../../fila/v1/NackMessage'; export interface NackRequest { - 'queue'?: (string); - 'messageId'?: (string); - 'error'?: (string); + 'messages'?: (_fila_v1_NackMessage)[]; } export interface NackRequest__Output { - 'queue': (string); - 'messageId': (string); - 'error': (string); + 'messages': (_fila_v1_NackMessage__Output)[]; } diff --git a/generated/fila/v1/NackResponse.ts b/generated/fila/v1/NackResponse.ts index dbc6271..fd00fe1 100644 --- a/generated/fila/v1/NackResponse.ts +++ b/generated/fila/v1/NackResponse.ts @@ -1,8 +1,11 @@ // Original file: proto/fila/v1/service.proto +import type { NackResult as _fila_v1_NackResult, NackResult__Output as _fila_v1_NackResult__Output } from '../../fila/v1/NackResult'; export interface NackResponse { + 'results'?: (_fila_v1_NackResult)[]; } export interface NackResponse__Output { + 'results': (_fila_v1_NackResult__Output)[]; } diff --git a/generated/fila/v1/NackResult.ts b/generated/fila/v1/NackResult.ts new file mode 100644 index 0000000..9a205d9 --- /dev/null +++ b/generated/fila/v1/NackResult.ts @@ -0,0 +1,16 @@ +// Original file: proto/fila/v1/service.proto + +import type { NackSuccess as _fila_v1_NackSuccess, NackSuccess__Output as _fila_v1_NackSuccess__Output } from '../../fila/v1/NackSuccess'; +import type { NackError as _fila_v1_NackError, NackError__Output as _fila_v1_NackError__Output } from '../../fila/v1/NackError'; + +export interface NackResult { + 'success'?: (_fila_v1_NackSuccess | null); + 'error'?: (_fila_v1_NackError | null); + 'result'?: "success"|"error"; +} + +export interface NackResult__Output { + 'success'?: (_fila_v1_NackSuccess__Output | null); + 'error'?: (_fila_v1_NackError__Output | null); + 'result'?: "success"|"error"; +} diff --git a/generated/fila/v1/NackSuccess.ts b/generated/fila/v1/NackSuccess.ts new file mode 100644 index 0000000..cd61ea8 --- /dev/null +++ b/generated/fila/v1/NackSuccess.ts @@ -0,0 +1,8 @@ +// Original file: proto/fila/v1/service.proto + + +export interface NackSuccess { +} + +export interface NackSuccess__Output { +} diff --git a/generated/fila/v1/StreamEnqueueRequest.ts b/generated/fila/v1/StreamEnqueueRequest.ts new file mode 100644 index 0000000..03d0b32 --- /dev/null +++ b/generated/fila/v1/StreamEnqueueRequest.ts @@ -0,0 +1,14 @@ +// Original file: proto/fila/v1/service.proto + +import type { EnqueueMessage as _fila_v1_EnqueueMessage, EnqueueMessage__Output as _fila_v1_EnqueueMessage__Output } from '../../fila/v1/EnqueueMessage'; +import type { Long } from '@grpc/proto-loader'; + +export interface StreamEnqueueRequest { + 'messages'?: (_fila_v1_EnqueueMessage)[]; + 'sequenceNumber'?: (number | string | Long); +} + +export interface StreamEnqueueRequest__Output { + 'messages': (_fila_v1_EnqueueMessage__Output)[]; + 'sequenceNumber': (string); +} diff --git a/generated/fila/v1/StreamEnqueueResponse.ts b/generated/fila/v1/StreamEnqueueResponse.ts new file mode 100644 index 0000000..56c5586 --- /dev/null +++ b/generated/fila/v1/StreamEnqueueResponse.ts @@ -0,0 +1,14 @@ +// Original file: proto/fila/v1/service.proto + +import type { EnqueueResult as _fila_v1_EnqueueResult, EnqueueResult__Output as _fila_v1_EnqueueResult__Output } from '../../fila/v1/EnqueueResult'; +import type { Long } from '@grpc/proto-loader'; + +export interface StreamEnqueueResponse { + 'sequenceNumber'?: (number | string | Long); + 'results'?: (_fila_v1_EnqueueResult)[]; +} + +export interface StreamEnqueueResponse__Output { + 'sequenceNumber': (string); + 'results': (_fila_v1_EnqueueResult__Output)[]; +} diff --git a/generated/service.ts b/generated/service.ts index 726d422..eb61748 100644 --- a/generated/service.ts +++ b/generated/service.ts @@ -1,5 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; -import type { MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; import type { FilaServiceClient as _fila_v1_FilaServiceClient, FilaServiceDefinition as _fila_v1_FilaServiceDefinition } from './fila/v1/FilaService'; @@ -10,21 +10,34 @@ type SubtypeConstructor any, Subtype> export interface ProtoGrpcType { fila: { v1: { + AckError: MessageTypeDefinition + AckErrorCode: EnumTypeDefinition + AckMessage: MessageTypeDefinition AckRequest: MessageTypeDefinition AckResponse: MessageTypeDefinition - BatchEnqueueRequest: MessageTypeDefinition - BatchEnqueueResponse: MessageTypeDefinition - BatchEnqueueResult: MessageTypeDefinition + AckResult: MessageTypeDefinition + AckSuccess: MessageTypeDefinition ConsumeRequest: MessageTypeDefinition ConsumeResponse: MessageTypeDefinition + EnqueueError: MessageTypeDefinition + EnqueueErrorCode: EnumTypeDefinition + EnqueueMessage: MessageTypeDefinition EnqueueRequest: MessageTypeDefinition EnqueueResponse: MessageTypeDefinition + EnqueueResult: MessageTypeDefinition FilaService: SubtypeConstructor & { service: _fila_v1_FilaServiceDefinition } Message: MessageTypeDefinition MessageMetadata: MessageTypeDefinition MessageTimestamps: MessageTypeDefinition + NackError: MessageTypeDefinition + NackErrorCode: EnumTypeDefinition + NackMessage: MessageTypeDefinition NackRequest: MessageTypeDefinition NackResponse: MessageTypeDefinition + NackResult: MessageTypeDefinition + NackSuccess: MessageTypeDefinition + StreamEnqueueRequest: MessageTypeDefinition + StreamEnqueueResponse: MessageTypeDefinition } } google: { diff --git a/proto/fila/v1/service.proto b/proto/fila/v1/service.proto index fc0f710..7d1db79 100644 --- a/proto/fila/v1/service.proto +++ b/proto/fila/v1/service.proto @@ -6,20 +6,49 @@ import "fila/v1/messages.proto"; // Hot-path RPCs for producers and consumers. service FilaService { rpc Enqueue(EnqueueRequest) returns (EnqueueResponse); - rpc BatchEnqueue(BatchEnqueueRequest) returns (BatchEnqueueResponse); + rpc StreamEnqueue(stream StreamEnqueueRequest) returns (stream StreamEnqueueResponse); rpc Consume(ConsumeRequest) returns (stream ConsumeResponse); rpc Ack(AckRequest) returns (AckResponse); rpc Nack(NackRequest) returns (NackResponse); } -message EnqueueRequest { +// Individual message to enqueue. +message EnqueueMessage { string queue = 1; map headers = 2; bytes payload = 3; } +// Enqueue one or more messages. +message EnqueueRequest { + repeated EnqueueMessage messages = 1; +} + +// Per-message enqueue result. +message EnqueueResult { + oneof result { + string message_id = 1; + EnqueueError error = 2; + } +} + +// Typed enqueue error with structured error code. +message EnqueueError { + EnqueueErrorCode code = 1; + string message = 2; +} + +enum EnqueueErrorCode { + ENQUEUE_ERROR_CODE_UNSPECIFIED = 0; + ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND = 1; + ENQUEUE_ERROR_CODE_STORAGE = 2; + ENQUEUE_ERROR_CODE_LUA = 3; + ENQUEUE_ERROR_CODE_PERMISSION_DENIED = 4; +} + +// One result per input message. message EnqueueResponse { - string message_id = 1; + repeated EnqueueResult results = 1; } message ConsumeRequest { @@ -27,36 +56,87 @@ message ConsumeRequest { } message ConsumeResponse { - Message message = 1; // Single message (backward compatible, used when batch size is 1) - repeated Message messages = 2; // Batched messages (populated when server sends multiple at once) + repeated Message messages = 1; } -message AckRequest { +// Individual ack item. +message AckMessage { string queue = 1; string message_id = 2; } -message AckResponse {} +message AckRequest { + repeated AckMessage messages = 1; +} + +message AckResult { + oneof result { + AckSuccess success = 1; + AckError error = 2; + } +} -message NackRequest { +message AckSuccess {} + +message AckError { + AckErrorCode code = 1; + string message = 2; +} + +enum AckErrorCode { + ACK_ERROR_CODE_UNSPECIFIED = 0; + ACK_ERROR_CODE_MESSAGE_NOT_FOUND = 1; + ACK_ERROR_CODE_STORAGE = 2; + ACK_ERROR_CODE_PERMISSION_DENIED = 3; +} + +message AckResponse { + repeated AckResult results = 1; +} + +// Individual nack item. +message NackMessage { string queue = 1; string message_id = 2; string error = 3; } -message NackResponse {} +message NackRequest { + repeated NackMessage messages = 1; +} + +message NackResult { + oneof result { + NackSuccess success = 1; + NackError error = 2; + } +} -message BatchEnqueueRequest { - repeated EnqueueRequest messages = 1; +message NackSuccess {} + +message NackError { + NackErrorCode code = 1; + string message = 2; } -message BatchEnqueueResponse { - repeated BatchEnqueueResult results = 1; +enum NackErrorCode { + NACK_ERROR_CODE_UNSPECIFIED = 0; + NACK_ERROR_CODE_MESSAGE_NOT_FOUND = 1; + NACK_ERROR_CODE_STORAGE = 2; + NACK_ERROR_CODE_PERMISSION_DENIED = 3; } -message BatchEnqueueResult { - oneof result { - EnqueueResponse success = 1; - string error = 2; - } +message NackResponse { + repeated NackResult results = 1; +} + +// Stream enqueue — per-write batch with sequence tracking. +message StreamEnqueueRequest { + repeated EnqueueMessage messages = 1; + uint64 sequence_number = 2; +} + +message StreamEnqueueResponse { + uint64 sequence_number = 1; + repeated EnqueueResult results = 2; } diff --git a/src/batcher.ts b/src/batcher.ts index c9b3586..214478d 100644 --- a/src/batcher.ts +++ b/src/batcher.ts @@ -4,7 +4,6 @@ import { QueueNotFoundError, RPCError } from "./errors"; import type { EnqueueMessage } from "./types"; import type { FilaServiceClient } from "../generated/fila/v1/FilaService"; import type { EnqueueResponse__Output } from "../generated/fila/v1/EnqueueResponse"; -import type { BatchEnqueueResponse__Output } from "../generated/fila/v1/BatchEnqueueResponse"; /** Controls how the SDK batches enqueue() calls. */ export type BatchMode = @@ -19,7 +18,18 @@ interface BatchItem { reject: (err: Error) => void; } -function mapEnqueueError(err: grpc.ServiceError): Error { +/** + * Map a per-message EnqueueResult error to an SDK error. + * The unified proto uses typed EnqueueError with an error code. + */ +function mapResultError(code: string, message: string): Error { + if (code === "ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND") { + return new QueueNotFoundError(`enqueue: ${message}`); + } + return new RPCError(grpc.status.INTERNAL, message); +} + +function mapTransportError(err: grpc.ServiceError): Error { if (err.code === grpc.status.NOT_FOUND) { return new QueueNotFoundError(`enqueue: ${err.details}`); } @@ -28,7 +38,8 @@ function mapEnqueueError(err: grpc.ServiceError): Error { /** * Background batcher that collects enqueue() calls and flushes them - * as batch RPCs. Supports auto (opportunistic) and linger (timer-based) modes. + * via the unified Enqueue RPC (which accepts repeated messages). + * Supports auto (opportunistic) and linger (timer-based) modes. */ export class Batcher { private readonly grpcClient: FilaServiceClient; @@ -41,6 +52,7 @@ export class Batcher { private closed = false; private drainResolvers: Array<() => void> = []; private lingerTimer: ReturnType | null = null; + private inFlightCount = 0; constructor( grpcClient: FilaServiceClient, @@ -155,17 +167,18 @@ export class Batcher { private flushAll(): void { while (this.pending.length > 0) { const items = this.pending.splice(0, this.maxBatchSize); - // Fire-and-forget: flush concurrently. + this.inFlightCount++; this.flushBatch(items).then(() => { + this.inFlightCount--; this.notifyDrainComplete(); }); } - // Also check drain in case pending was already empty. + // Also check drain in case pending was already empty and nothing in-flight. this.notifyDrainComplete(); } private notifyDrainComplete(): void { - if (this.pending.length === 0 && this.drainResolvers.length > 0) { + if (this.pending.length === 0 && this.inFlightCount === 0 && this.drainResolvers.length > 0) { const resolvers = this.drainResolvers.splice(0); for (const resolve of resolvers) { resolve(); @@ -174,43 +187,12 @@ export class Batcher { } /** - * Flush a batch of items. Single item uses Enqueue RPC (preserves error - * types like QueueNotFoundError). Multiple items use BatchEnqueue. + * Flush a batch of items via the unified Enqueue RPC (repeated messages). + * All items -- single or multiple -- use the same RPC. */ - private async flushBatch(items: BatchItem[]): Promise { - if (items.length === 0) return; - - if (items.length === 1) { - return this.flushSingle(items[0]); - } + private flushBatch(items: BatchItem[]): Promise { + if (items.length === 0) return Promise.resolve(); - return this.flushMultiple(items); - } - - /** Flush a single item via the regular Enqueue RPC. */ - private flushSingle(item: BatchItem): Promise { - return new Promise((resolve) => { - this.grpcClient.enqueue( - { - queue: item.message.queue, - headers: item.message.headers, - payload: item.message.payload, - }, - this.callMetadata(), - (err: grpc.ServiceError | null, resp?: EnqueueResponse__Output) => { - if (err) { - item.reject(mapEnqueueError(err)); - } else { - item.resolve(resp!.messageId); - } - resolve(); - } - ); - }); - } - - /** Flush multiple items via the BatchEnqueue RPC. */ - private flushMultiple(items: BatchItem[]): Promise { const messages = items.map((item) => ({ queue: item.message.queue, headers: item.message.headers, @@ -218,16 +200,13 @@ export class Batcher { })); return new Promise((resolve) => { - this.grpcClient.batchEnqueue( + this.grpcClient.enqueue( { messages }, this.callMetadata(), - ( - err: grpc.ServiceError | null, - resp?: BatchEnqueueResponse__Output - ) => { + (err: grpc.ServiceError | null, resp?: EnqueueResponse__Output) => { if (err) { // Transport-level failure: all items get the error. - const mapped = new RPCError(err.code, err.details); + const mapped = mapTransportError(err); for (const item of items) { item.reject(mapped); } @@ -244,11 +223,11 @@ export class Batcher { ); continue; } - if (result.result === "success" && result.success) { - items[i].resolve(result.success.messageId!); + if (result.result === "messageId" && result.messageId) { + items[i].resolve(result.messageId); } else if (result.result === "error" && result.error) { items[i].reject( - new RPCError(grpc.status.INTERNAL, result.error) + mapResultError(result.error.code, result.error.message) ); } else { items[i].reject( @@ -262,5 +241,4 @@ export class Batcher { ); }); } - } diff --git a/src/client.ts b/src/client.ts index ad4b4a6..2cc4a75 100644 --- a/src/client.ts +++ b/src/client.ts @@ -9,9 +9,11 @@ import { QueueNotFoundError, RPCError, } from "./errors"; -import type { ConsumeMessage, EnqueueMessage, BatchEnqueueResult } from "./types"; +import type { ConsumeMessage, EnqueueMessage, EnqueueResult } from "./types"; import type { FilaServiceClient } from "../generated/fila/v1/FilaService"; import type { EnqueueResponse__Output } from "../generated/fila/v1/EnqueueResponse"; +import type { AckResponse__Output } from "../generated/fila/v1/AckResponse"; +import type { NackResponse__Output } from "../generated/fila/v1/NackResponse"; import type { ConsumeResponse__Output } from "../generated/fila/v1/ConsumeResponse"; import { Batcher, type BatchMode } from "./batcher"; @@ -85,52 +87,49 @@ function mapConsumeError(err: grpc.ServiceError): FilaError { return new RPCError(err.code, err.details); } -function mapAckError(err: grpc.ServiceError): FilaError { - if (err.code === grpc.status.NOT_FOUND) { - return new MessageNotFoundError(`ack: ${err.details}`); +/** + * Map a per-message EnqueueResult error code to an SDK error type. + */ +function mapEnqueueResultError(code: string, message: string): FilaError { + if (code === "ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND") { + return new QueueNotFoundError(`enqueue: ${message}`); } - return new RPCError(err.code, err.details); + return new RPCError(grpc.status.INTERNAL, message); } -function mapNackError(err: grpc.ServiceError): FilaError { - if (err.code === grpc.status.NOT_FOUND) { - return new MessageNotFoundError(`nack: ${err.details}`); +/** + * Map a per-message AckResult error code to an SDK error type. + */ +function mapAckResultError(code: string, message: string): FilaError { + if (code === "ACK_ERROR_CODE_MESSAGE_NOT_FOUND") { + return new MessageNotFoundError(`ack: ${message}`); } - return new RPCError(err.code, err.details); + return new RPCError(grpc.status.INTERNAL, message); +} + +/** + * Map a per-message NackResult error code to an SDK error type. + */ +function mapNackResultError(code: string, message: string): FilaError { + if (code === "NACK_ERROR_CODE_MESSAGE_NOT_FOUND") { + return new MessageNotFoundError(`nack: ${message}`); + } + return new RPCError(grpc.status.INTERNAL, message); } /** Map a ConsumeResponse to ConsumeMessage(s), skipping keepalive frames. */ function mapConsumeResponse( resp: ConsumeResponse__Output ): ConsumeMessage[] { - // Prefer the batched `messages` field when non-empty. - if (resp.messages && resp.messages.length > 0) { - const results: ConsumeMessage[] = []; - for (const msg of resp.messages) { - if (!msg || !msg.id) continue; - const metadata = msg.metadata; - results.push({ - id: msg.id, - headers: msg.headers ?? {}, - payload: Buffer.isBuffer(msg.payload) - ? msg.payload - : Buffer.from(msg.payload ?? ""), - fairnessKey: metadata?.fairnessKey ?? "", - attemptCount: metadata?.attemptCount ?? 0, - queue: metadata?.queueId ?? "", - }); - } - return results; - } - - // Fall back to singular `message` field (backward compatible). - const msg = resp.message; - if (!msg || !msg.id) { + if (!resp.messages || resp.messages.length === 0) { return []; // keepalive frame } - const metadata = msg.metadata; - return [ - { + + const results: ConsumeMessage[] = []; + for (const msg of resp.messages) { + if (!msg || !msg.id) continue; + const metadata = msg.metadata; + results.push({ id: msg.id, headers: msg.headers ?? {}, payload: Buffer.isBuffer(msg.payload) @@ -139,8 +138,9 @@ function mapConsumeResponse( fairnessKey: metadata?.fairnessKey ?? "", attemptCount: metadata?.attemptCount ?? 0, queue: metadata?.queueId ?? "", - }, - ]; + }); + } + return results; } /** Connection options for TLS, authentication, and batching. */ @@ -292,7 +292,7 @@ export class Client { * * When batching is enabled (default), the message is routed through the * batcher. At low load, messages are sent individually. At high load, - * messages cluster naturally into BatchEnqueue RPCs. + * messages cluster naturally into larger Enqueue RPCs. * * @param queue - Target queue name. * @param headers - Optional message headers. @@ -315,16 +315,27 @@ export class Client { }); } - // No batching: direct RPC. + // No batching: direct RPC with single message in the repeated field. return new Promise((resolve, reject) => { this.grpcClient.enqueue( - { queue, headers: headers ?? {}, payload }, + { messages: [{ queue, headers: headers ?? {}, payload }] }, this.callMetadata(), (err: grpc.ServiceError | null, resp?: EnqueueResponse__Output) => { if (err) { reject(mapEnqueueError(err)); + return; + } + const result = resp!.results[0]; + if (!result) { + reject(new RPCError(grpc.status.INTERNAL, "no result from server")); + return; + } + if (result.result === "messageId" && result.messageId) { + resolve(result.messageId); + } else if (result.result === "error" && result.error) { + reject(mapEnqueueResultError(result.error.code, result.error.message)); } else { - resolve(resp!.messageId); + reject(new RPCError(grpc.status.INTERNAL, "no result from server")); } } ); @@ -332,60 +343,53 @@ export class Client { } /** - * Enqueue a batch of messages in a single RPC call. + * Enqueue multiple messages in a single RPC call. * * Each message is independently validated and processed. A failed message - * does not affect the others in the batch. Returns one result per input - * message, in the same order. + * does not affect the others. Returns one result per input message, + * in the same order. * - * This is more efficient than calling enqueue() in a loop because it - * amortizes the RPC overhead across all messages. + * This always bypasses the batcher and issues a direct Enqueue RPC. * * @param messages - Array of messages to enqueue. * @returns Per-message results (success with messageId, or error with description). - * @throws {RPCError} For transport-level failures affecting the entire batch. + * @throws {RPCError} For transport-level failures affecting the entire call. */ - batchEnqueue(messages: EnqueueMessage[]): Promise { - // batchEnqueue always bypasses the batcher and uses a direct RPC. - // Create a temporary batcher-like object to reuse the RPC logic, - // or just call the gRPC client directly. - return this.doBatchEnqueue(messages); - } - - private doBatchEnqueue(messages: EnqueueMessage[]): Promise { + enqueueMany(messages: EnqueueMessage[]): Promise { const protoMessages = messages.map((m) => ({ queue: m.queue, headers: m.headers, payload: m.payload, })); - return new Promise((resolve, reject) => { - this.grpcClient.batchEnqueue( + return new Promise((resolve, reject) => { + this.grpcClient.enqueue( { messages: protoMessages }, this.callMetadata(), - (err: grpc.ServiceError | null, resp?) => { + (err: grpc.ServiceError | null, resp?: EnqueueResponse__Output) => { if (err) { reject(new RPCError(err.code, err.details)); return; } - const results: BatchEnqueueResult[] = resp!.results.map( - (r: { result?: string; success?: { messageId?: string } | null; error?: string }) => { - if (r.result === "success" && r.success) { - return { - success: true as const, - messageId: r.success.messageId!, - }; - } else if (r.result === "error" && r.error) { - return { success: false as const, error: r.error }; - } else { - return { - success: false as const, - error: "no result from server", - }; - } + const results: EnqueueResult[] = resp!.results.map((r) => { + if (r.result === "messageId" && r.messageId) { + return { + success: true as const, + messageId: r.messageId, + }; + } else if (r.result === "error" && r.error) { + return { + success: false as const, + error: r.error.message, + }; + } else { + return { + success: false as const, + error: "no result from server", + }; } - ); + }); resolve(results); } @@ -397,9 +401,9 @@ export class Client { * Open a streaming consumer on the specified queue. * * Returns an async iterable that yields messages as they become available. - * Nil message frames (keepalive signals) are skipped automatically. - * Batched delivery frames (multiple messages per ConsumeResponse) are - * transparently unpacked into individual messages. + * Empty response frames (keepalive signals) are skipped automatically. + * Delivery frames containing multiple messages are transparently unpacked + * into individual messages. * * If the server returns UNAVAILABLE with an `x-fila-leader-addr` metadata * header, the client transparently reconnects to the leader node and retries @@ -489,13 +493,24 @@ export class Client { ack(queue: string, msgId: string): Promise { return new Promise((resolve, reject) => { this.grpcClient.ack( - { queue, messageId: msgId }, + { messages: [{ queue, messageId: msgId }] }, this.callMetadata(), - (err: grpc.ServiceError | null) => { + (err: grpc.ServiceError | null, resp?: AckResponse__Output) => { if (err) { - reject(mapAckError(err)); - } else { + reject(new RPCError(err.code, err.details)); + return; + } + const result = resp!.results[0]; + if (!result) { + reject(new RPCError(grpc.status.INTERNAL, "no result from server")); + return; + } + if (result.result === "success") { resolve(); + } else if (result.result === "error" && result.error) { + reject(mapAckResultError(result.error.code, result.error.message)); + } else { + reject(new RPCError(grpc.status.INTERNAL, "no result from server")); } } ); @@ -513,13 +528,24 @@ export class Client { nack(queue: string, msgId: string, error: string): Promise { return new Promise((resolve, reject) => { this.grpcClient.nack( - { queue, messageId: msgId, error }, + { messages: [{ queue, messageId: msgId, error }] }, this.callMetadata(), - (err: grpc.ServiceError | null) => { + (err: grpc.ServiceError | null, resp?: NackResponse__Output) => { if (err) { - reject(mapNackError(err)); - } else { + reject(new RPCError(err.code, err.details)); + return; + } + const result = resp!.results[0]; + if (!result) { + reject(new RPCError(grpc.status.INTERNAL, "no result from server")); + return; + } + if (result.result === "success") { resolve(); + } else if (result.result === "error" && result.error) { + reject(mapNackResultError(result.error.code, result.error.message)); + } else { + reject(new RPCError(grpc.status.INTERNAL, "no result from server")); } } ); diff --git a/src/index.ts b/src/index.ts index e7a3302..9f479a0 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,6 +1,6 @@ export { Client } from "./client"; export type { ClientOptions } from "./client"; -export type { ConsumeMessage, EnqueueMessage, BatchEnqueueResult } from "./types"; +export type { ConsumeMessage, EnqueueMessage, EnqueueResult } from "./types"; export { FilaError, QueueNotFoundError, diff --git a/src/types.ts b/src/types.ts index cb9fd4b..99706b8 100644 --- a/src/types.ts +++ b/src/types.ts @@ -24,7 +24,7 @@ export interface EnqueueMessage { payload: Buffer; } -/** The result of a single message within a batch enqueue call. */ -export type BatchEnqueueResult = +/** The result of a single message within an enqueue call. */ +export type EnqueueResult = | { success: true; messageId: string } | { success: false; error: string }; diff --git a/test/batch.test.ts b/test/batch.test.ts index ce47540..40ea0e2 100644 --- a/test/batch.test.ts +++ b/test/batch.test.ts @@ -7,7 +7,7 @@ import { type TestServer, } from "./helpers"; -describe.skipIf(!FILA_SERVER_AVAILABLE)("Batch operations", () => { +describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { let server: TestServer; beforeAll(async () => { @@ -18,16 +18,16 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Batch operations", () => { server?.stop(); }); - describe("batchEnqueue", () => { + describe("enqueueMany", () => { it("enqueues multiple messages in a single RPC", async () => { - await server.createQueue("batch-multi"); + await server.createQueue("multi-enqueue"); const client = new Client(server.addr, { batchMode: "disabled" }); try { - const results = await client.batchEnqueue([ - { queue: "batch-multi", headers: { idx: "0" }, payload: Buffer.from("msg-0") }, - { queue: "batch-multi", headers: { idx: "1" }, payload: Buffer.from("msg-1") }, - { queue: "batch-multi", headers: { idx: "2" }, payload: Buffer.from("msg-2") }, + const results = await client.enqueueMany([ + { queue: "multi-enqueue", headers: { idx: "0" }, payload: Buffer.from("msg-0") }, + { queue: "multi-enqueue", headers: { idx: "1" }, payload: Buffer.from("msg-1") }, + { queue: "multi-enqueue", headers: { idx: "2" }, payload: Buffer.from("msg-2") }, ]); expect(results).toHaveLength(3); @@ -41,9 +41,9 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Batch operations", () => { // Verify all messages are consumable. const received: string[] = []; let count = 0; - for await (const msg of client.consume("batch-multi")) { + for await (const msg of client.consume("multi-enqueue")) { received.push(msg.payload.toString()); - await client.ack("batch-multi", msg.id); + await client.ack("multi-enqueue", msg.id); count++; if (count >= 3) break; } @@ -56,12 +56,12 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Batch operations", () => { }); it("returns per-message errors for nonexistent queues", async () => { - await server.createQueue("batch-partial"); + await server.createQueue("multi-partial"); const client = new Client(server.addr, { batchMode: "disabled" }); try { - const results = await client.batchEnqueue([ - { queue: "batch-partial", headers: {}, payload: Buffer.from("ok") }, + const results = await client.enqueueMany([ + { queue: "multi-partial", headers: {}, payload: Buffer.from("ok") }, { queue: "no-such-queue", headers: {}, payload: Buffer.from("fail") }, ]); @@ -77,13 +77,13 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Batch operations", () => { }); it("returns message IDs in same order as input", async () => { - await server.createQueue("batch-order"); + await server.createQueue("multi-order"); const client = new Client(server.addr, { batchMode: "disabled" }); try { - const results = await client.batchEnqueue([ - { queue: "batch-order", headers: {}, payload: Buffer.from("first") }, - { queue: "batch-order", headers: {}, payload: Buffer.from("second") }, + const results = await client.enqueueMany([ + { queue: "multi-order", headers: {}, payload: Buffer.from("first") }, + { queue: "multi-order", headers: {}, payload: Buffer.from("second") }, ]); expect(results).toHaveLength(2); @@ -157,7 +157,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Batch operations", () => { const client = new Client(server.addr); try { // Single message to nonexistent queue: should get QueueNotFoundError - // because single-item batches use Enqueue RPC. + // because the per-result error code is mapped to QueueNotFoundError. await expect( client.enqueue("no-such-queue-auto", null, Buffer.from("fail")) ).rejects.toThrow(QueueNotFoundError); From d26a49cc84fee2255fd064da1989be0b37a7e2ca Mon Sep 17 00:00:00 2001 From: Lucas Vieira Date: Sat, 4 Apr 2026 10:02:25 -0300 Subject: [PATCH 7/8] feat: migrate from grpc to fila binary protocol (fibp) replace grpc transport with native fibp binary protocol over tcp. add fibp codec (encoder/decoder/frame reader), connection manager with tls/handshake/multiplexing, and rewrite all client operations. add admin ops (createQueue, deleteQueue, getStats, listQueues, setConfig, getConfig, listConfig, redrive) and auth ops (createApiKey, revokeApiKey, listApiKeys, setAcl, getAcl). remove @grpc/grpc-js, @grpc/proto-loader, proto files, and generated code. all 47 tests pass. --- README.md | 82 +- generated/admin.ts | 50 - generated/fila/v1/AckError.ts | 13 - generated/fila/v1/AckErrorCode.ts | 20 - generated/fila/v1/AckMessage.ts | 12 - generated/fila/v1/AckRequest.ts | 11 - generated/fila/v1/AckResponse.ts | 11 - generated/fila/v1/AckResult.ts | 16 - generated/fila/v1/AckSuccess.ts | 8 - generated/fila/v1/AclPermission.ts | 12 - generated/fila/v1/ApiKeyInfo.ts | 19 - generated/fila/v1/ConfigEntry.ts | 12 - generated/fila/v1/ConsumeRequest.ts | 10 - generated/fila/v1/ConsumeResponse.ts | 11 - generated/fila/v1/CreateApiKeyRequest.ts | 15 - generated/fila/v1/CreateApiKeyResponse.ts | 14 - generated/fila/v1/CreateQueueRequest.ts | 13 - generated/fila/v1/CreateQueueResponse.ts | 10 - generated/fila/v1/DeleteQueueRequest.ts | 10 - generated/fila/v1/DeleteQueueResponse.ts | 8 - generated/fila/v1/EnqueueError.ts | 13 - generated/fila/v1/EnqueueErrorCode.ts | 23 - generated/fila/v1/EnqueueMessage.ts | 14 - generated/fila/v1/EnqueueRequest.ts | 11 - generated/fila/v1/EnqueueResponse.ts | 11 - generated/fila/v1/EnqueueResult.ts | 15 - generated/fila/v1/FilaAdmin.ts | 195 ---- generated/fila/v1/FilaService.ts | 75 -- generated/fila/v1/GetAclRequest.ts | 10 - generated/fila/v1/GetAclResponse.ts | 15 - generated/fila/v1/GetConfigRequest.ts | 10 - generated/fila/v1/GetConfigResponse.ts | 10 - generated/fila/v1/GetStatsRequest.ts | 10 - generated/fila/v1/GetStatsResponse.ts | 29 - generated/fila/v1/ListApiKeysRequest.ts | 8 - generated/fila/v1/ListApiKeysResponse.ts | 11 - generated/fila/v1/ListConfigRequest.ts | 10 - generated/fila/v1/ListConfigResponse.ts | 13 - generated/fila/v1/ListQueuesRequest.ts | 8 - generated/fila/v1/ListQueuesResponse.ts | 13 - generated/fila/v1/Message.ts | 20 - generated/fila/v1/MessageMetadata.ts | 18 - generated/fila/v1/MessageTimestamps.ts | 13 - generated/fila/v1/NackError.ts | 13 - generated/fila/v1/NackErrorCode.ts | 20 - generated/fila/v1/NackMessage.ts | 14 - generated/fila/v1/NackRequest.ts | 11 - generated/fila/v1/NackResponse.ts | 11 - generated/fila/v1/NackResult.ts | 16 - generated/fila/v1/NackSuccess.ts | 8 - generated/fila/v1/PerFairnessKeyStats.ts | 17 - generated/fila/v1/PerThrottleKeyStats.ts | 16 - generated/fila/v1/QueueConfig.ts | 15 - generated/fila/v1/QueueInfo.ts | 19 - generated/fila/v1/RedriveRequest.ts | 13 - generated/fila/v1/RedriveResponse.ts | 11 - generated/fila/v1/RevokeApiKeyRequest.ts | 10 - generated/fila/v1/RevokeApiKeyResponse.ts | 8 - generated/fila/v1/SetAclRequest.ts | 13 - generated/fila/v1/SetAclResponse.ts | 8 - generated/fila/v1/SetConfigRequest.ts | 12 - generated/fila/v1/SetConfigResponse.ts | 8 - generated/fila/v1/StreamEnqueueRequest.ts | 14 - generated/fila/v1/StreamEnqueueResponse.ts | 14 - generated/google/protobuf/Timestamp.ts | 13 - generated/messages.ts | 23 - generated/service.ts | 49 - package-lock.json | 317 +----- package.json | 13 +- proto/fila/v1/admin.proto | 197 ---- proto/fila/v1/messages.proto | 28 - proto/fila/v1/service.proto | 142 --- src/batcher.ts | 181 ++-- src/client.ts | 1132 +++++++++++++------- src/connection.ts | 365 +++++++ src/errors.ts | 139 ++- src/fibp/codec.ts | 347 ++++++ src/fibp/constants.ts | 109 ++ src/fibp/index.ts | 3 + src/index.ts | 18 +- src/types.ts | 55 + test/auth.test.ts | 59 +- test/batch.test.ts | 28 +- test/batcher.unit.test.ts | 2 - test/client.test.ts | 5 +- test/codec.test.ts | 176 +++ test/helpers.ts | 121 +-- tsconfig.json | 3 +- vitest.config.ts | 1 + 89 files changed, 2134 insertions(+), 2575 deletions(-) delete mode 100644 generated/admin.ts delete mode 100644 generated/fila/v1/AckError.ts delete mode 100644 generated/fila/v1/AckErrorCode.ts delete mode 100644 generated/fila/v1/AckMessage.ts delete mode 100644 generated/fila/v1/AckRequest.ts delete mode 100644 generated/fila/v1/AckResponse.ts delete mode 100644 generated/fila/v1/AckResult.ts delete mode 100644 generated/fila/v1/AckSuccess.ts delete mode 100644 generated/fila/v1/AclPermission.ts delete mode 100644 generated/fila/v1/ApiKeyInfo.ts delete mode 100644 generated/fila/v1/ConfigEntry.ts delete mode 100644 generated/fila/v1/ConsumeRequest.ts delete mode 100644 generated/fila/v1/ConsumeResponse.ts delete mode 100644 generated/fila/v1/CreateApiKeyRequest.ts delete mode 100644 generated/fila/v1/CreateApiKeyResponse.ts delete mode 100644 generated/fila/v1/CreateQueueRequest.ts delete mode 100644 generated/fila/v1/CreateQueueResponse.ts delete mode 100644 generated/fila/v1/DeleteQueueRequest.ts delete mode 100644 generated/fila/v1/DeleteQueueResponse.ts delete mode 100644 generated/fila/v1/EnqueueError.ts delete mode 100644 generated/fila/v1/EnqueueErrorCode.ts delete mode 100644 generated/fila/v1/EnqueueMessage.ts delete mode 100644 generated/fila/v1/EnqueueRequest.ts delete mode 100644 generated/fila/v1/EnqueueResponse.ts delete mode 100644 generated/fila/v1/EnqueueResult.ts delete mode 100644 generated/fila/v1/FilaAdmin.ts delete mode 100644 generated/fila/v1/FilaService.ts delete mode 100644 generated/fila/v1/GetAclRequest.ts delete mode 100644 generated/fila/v1/GetAclResponse.ts delete mode 100644 generated/fila/v1/GetConfigRequest.ts delete mode 100644 generated/fila/v1/GetConfigResponse.ts delete mode 100644 generated/fila/v1/GetStatsRequest.ts delete mode 100644 generated/fila/v1/GetStatsResponse.ts delete mode 100644 generated/fila/v1/ListApiKeysRequest.ts delete mode 100644 generated/fila/v1/ListApiKeysResponse.ts delete mode 100644 generated/fila/v1/ListConfigRequest.ts delete mode 100644 generated/fila/v1/ListConfigResponse.ts delete mode 100644 generated/fila/v1/ListQueuesRequest.ts delete mode 100644 generated/fila/v1/ListQueuesResponse.ts delete mode 100644 generated/fila/v1/Message.ts delete mode 100644 generated/fila/v1/MessageMetadata.ts delete mode 100644 generated/fila/v1/MessageTimestamps.ts delete mode 100644 generated/fila/v1/NackError.ts delete mode 100644 generated/fila/v1/NackErrorCode.ts delete mode 100644 generated/fila/v1/NackMessage.ts delete mode 100644 generated/fila/v1/NackRequest.ts delete mode 100644 generated/fila/v1/NackResponse.ts delete mode 100644 generated/fila/v1/NackResult.ts delete mode 100644 generated/fila/v1/NackSuccess.ts delete mode 100644 generated/fila/v1/PerFairnessKeyStats.ts delete mode 100644 generated/fila/v1/PerThrottleKeyStats.ts delete mode 100644 generated/fila/v1/QueueConfig.ts delete mode 100644 generated/fila/v1/QueueInfo.ts delete mode 100644 generated/fila/v1/RedriveRequest.ts delete mode 100644 generated/fila/v1/RedriveResponse.ts delete mode 100644 generated/fila/v1/RevokeApiKeyRequest.ts delete mode 100644 generated/fila/v1/RevokeApiKeyResponse.ts delete mode 100644 generated/fila/v1/SetAclRequest.ts delete mode 100644 generated/fila/v1/SetAclResponse.ts delete mode 100644 generated/fila/v1/SetConfigRequest.ts delete mode 100644 generated/fila/v1/SetConfigResponse.ts delete mode 100644 generated/fila/v1/StreamEnqueueRequest.ts delete mode 100644 generated/fila/v1/StreamEnqueueResponse.ts delete mode 100644 generated/google/protobuf/Timestamp.ts delete mode 100644 generated/messages.ts delete mode 100644 generated/service.ts delete mode 100644 proto/fila/v1/admin.proto delete mode 100644 proto/fila/v1/messages.proto delete mode 100644 proto/fila/v1/service.proto create mode 100644 src/connection.ts create mode 100644 src/fibp/codec.ts create mode 100644 src/fibp/constants.ts create mode 100644 src/fibp/index.ts create mode 100644 test/codec.test.ts diff --git a/README.md b/README.md index f4d952e..f81fdde 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,8 @@ JavaScript/TypeScript client SDK for the [Fila](https://github.com/faisca/fila) message broker. +Uses the Fila binary protocol (FIBP) over TCP for all communication. + ## Installation ```bash @@ -14,6 +16,7 @@ npm install fila-client import { Client } from "fila-client"; const client = new Client("localhost:5555"); +await client.connect(); // Enqueue a message. const msgId = await client.enqueue( @@ -35,17 +38,16 @@ for await (const msg of client.consume("my-queue")) { } } -client.close(); +await client.close(); ``` ### TLS (system trust store) -If the Fila server uses a certificate signed by a public CA, enable TLS without providing a CA certificate — the OS system trust store is used automatically: +If the Fila server uses a certificate signed by a public CA, enable TLS without providing a CA certificate -- the OS system trust store is used automatically: ```typescript -import { Client } from "@fila/client"; - const client = new Client("localhost:5555", { tls: true }); +await client.connect(); ``` ### TLS (custom CA certificate) @@ -59,46 +61,39 @@ import { Client } from "fila-client"; const client = new Client("localhost:5555", { caCert: fs.readFileSync("ca.pem"), }); +await client.connect(); ``` ### Mutual TLS (mTLS) -Client certificates work with both modes — system trust store or custom CA: +Client certificates work with both modes -- system trust store or custom CA: ```typescript import * as fs from "fs"; -import { Client } from "@fila/client"; +import { Client } from "fila-client"; -// With custom CA: const client = new Client("localhost:5555", { caCert: fs.readFileSync("ca.pem"), clientCert: fs.readFileSync("client.pem"), clientKey: fs.readFileSync("client.key"), }); - -// With system trust store: -const client2 = new Client("localhost:5555", { - tls: true, - clientCert: fs.readFileSync("client.pem"), - clientKey: fs.readFileSync("client.key"), -}); +await client.connect(); ``` ### API key authentication ```typescript -import { Client } from "@fila/client"; - const client = new Client("localhost:5555", { apiKey: "my-api-key", }); +await client.connect(); ``` ### mTLS + API key ```typescript import * as fs from "fs"; -import { Client } from "@fila/client"; +import { Client } from "fila-client"; const client = new Client("localhost:5555", { caCert: fs.readFileSync("ca.pem"), @@ -106,13 +101,14 @@ const client = new Client("localhost:5555", { clientKey: fs.readFileSync("client.key"), apiKey: "my-api-key", }); +await client.connect(); ``` ## API ### `new Client(addr: string, options?: ClientOptions)` -Connect to a Fila broker at the given address (e.g., `"localhost:5555"`). +Create a client for the given address (e.g., `"localhost:5555"`). Call `connect()` to establish the connection. **Options:** @@ -122,34 +118,70 @@ Connect to a Fila broker at the given address (e.g., `"localhost:5555"`). | `caCert` | `Buffer` | CA certificate PEM. Enables TLS with a custom CA when set. | | `clientCert`| `Buffer` | Client certificate PEM for mTLS. Requires TLS to be enabled. | | `clientKey` | `Buffer` | Client private key PEM for mTLS. Requires TLS to be enabled. | -| `apiKey` | `string` | API key sent as `Bearer` token on every RPC call. | +| `apiKey` | `string` | API key sent in the FIBP handshake. | +| `batchMode` | `string` | Enqueue batching: `'auto'` (default), `'linger'`, or `'disabled'`. | + +### `client.connect(): Promise` + +Establish the TCP connection (with optional TLS) and perform the FIBP handshake. ### `client.enqueue(queue, headers, payload): Promise` Enqueue a message. Returns the broker-assigned message ID (UUIDv7). +### `client.enqueueMany(messages): Promise` + +Enqueue multiple messages in a single batch request. Returns per-message results. + ### `client.consume(queue): AsyncIterable` -Open a streaming consumer. Returns an async iterable that yields messages as they become available. Nacked messages are redelivered on the same stream. +Open a streaming consumer. Returns an async iterable that yields messages as they become available. ### `client.ack(queue, msgId): Promise` -Acknowledge a successfully processed message. The message is permanently removed. +Acknowledge a successfully processed message. ### `client.nack(queue, msgId, error): Promise` -Negatively acknowledge a failed message. The message is requeued or routed to the dead-letter queue based on the queue's configuration. +Negatively acknowledge a failed message. + +### `client.close(): Promise` + +Close the client, draining any pending batched messages first. + +### Admin operations + +- `client.createQueue(name, opts?): Promise` -- Create a queue. +- `client.deleteQueue(queue): Promise` -- Delete a queue. +- `client.getStats(queue): Promise` -- Get queue statistics. +- `client.listQueues(): Promise<{ clusterNodeCount, queues }>` -- List all queues. +- `client.setConfig(key, value): Promise` -- Set a runtime config key. +- `client.getConfig(key): Promise` -- Get a runtime config value. +- `client.listConfig(prefix): Promise>` -- List config entries. +- `client.redrive(dlqQueue, count): Promise` -- Redrive DLQ messages. -### `client.close(): void` +### Auth operations -Close the underlying gRPC channel. +- `client.createApiKey(name, opts?): Promise<{ keyId, key, isSuperadmin }>` -- Create an API key. +- `client.revokeApiKey(keyId): Promise` -- Revoke an API key. +- `client.listApiKeys(): Promise` -- List all API keys. +- `client.setAcl(keyId, permissions): Promise` -- Set ACL permissions. +- `client.getAcl(keyId): Promise<{ keyId, isSuperadmin, permissions }>` -- Get ACL permissions. ## Error Handling Per-operation error classes are thrown for specific failure modes: ```typescript -import { QueueNotFoundError, MessageNotFoundError } from "fila-client"; +import { + QueueNotFoundError, + MessageNotFoundError, + UnauthorizedError, + ForbiddenError, + NotLeaderError, + QueueAlreadyExistsError, + ProtocolError, +} from "fila-client"; try { await client.enqueue("missing-queue", null, Buffer.from("test")); diff --git a/generated/admin.ts b/generated/admin.ts deleted file mode 100644 index e521396..0000000 --- a/generated/admin.ts +++ /dev/null @@ -1,50 +0,0 @@ -import type * as grpc from '@grpc/grpc-js'; -import type { MessageTypeDefinition } from '@grpc/proto-loader'; - -import type { FilaAdminClient as _fila_v1_FilaAdminClient, FilaAdminDefinition as _fila_v1_FilaAdminDefinition } from './fila/v1/FilaAdmin'; - -type SubtypeConstructor any, Subtype> = { - new(...args: ConstructorParameters): Subtype; -}; - -export interface ProtoGrpcType { - fila: { - v1: { - AclPermission: MessageTypeDefinition - ApiKeyInfo: MessageTypeDefinition - ConfigEntry: MessageTypeDefinition - CreateApiKeyRequest: MessageTypeDefinition - CreateApiKeyResponse: MessageTypeDefinition - CreateQueueRequest: MessageTypeDefinition - CreateQueueResponse: MessageTypeDefinition - DeleteQueueRequest: MessageTypeDefinition - DeleteQueueResponse: MessageTypeDefinition - FilaAdmin: SubtypeConstructor & { service: _fila_v1_FilaAdminDefinition } - GetAclRequest: MessageTypeDefinition - GetAclResponse: MessageTypeDefinition - GetConfigRequest: MessageTypeDefinition - GetConfigResponse: MessageTypeDefinition - GetStatsRequest: MessageTypeDefinition - GetStatsResponse: MessageTypeDefinition - ListApiKeysRequest: MessageTypeDefinition - ListApiKeysResponse: MessageTypeDefinition - ListConfigRequest: MessageTypeDefinition - ListConfigResponse: MessageTypeDefinition - ListQueuesRequest: MessageTypeDefinition - ListQueuesResponse: MessageTypeDefinition - PerFairnessKeyStats: MessageTypeDefinition - PerThrottleKeyStats: MessageTypeDefinition - QueueConfig: MessageTypeDefinition - QueueInfo: MessageTypeDefinition - RedriveRequest: MessageTypeDefinition - RedriveResponse: MessageTypeDefinition - RevokeApiKeyRequest: MessageTypeDefinition - RevokeApiKeyResponse: MessageTypeDefinition - SetAclRequest: MessageTypeDefinition - SetAclResponse: MessageTypeDefinition - SetConfigRequest: MessageTypeDefinition - SetConfigResponse: MessageTypeDefinition - } - } -} - diff --git a/generated/fila/v1/AckError.ts b/generated/fila/v1/AckError.ts deleted file mode 100644 index bb2f56b..0000000 --- a/generated/fila/v1/AckError.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { AckErrorCode as _fila_v1_AckErrorCode, AckErrorCode__Output as _fila_v1_AckErrorCode__Output } from '../../fila/v1/AckErrorCode'; - -export interface AckError { - 'code'?: (_fila_v1_AckErrorCode); - 'message'?: (string); -} - -export interface AckError__Output { - 'code': (_fila_v1_AckErrorCode__Output); - 'message': (string); -} diff --git a/generated/fila/v1/AckErrorCode.ts b/generated/fila/v1/AckErrorCode.ts deleted file mode 100644 index 04a2113..0000000 --- a/generated/fila/v1/AckErrorCode.ts +++ /dev/null @@ -1,20 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -export const AckErrorCode = { - ACK_ERROR_CODE_UNSPECIFIED: 'ACK_ERROR_CODE_UNSPECIFIED', - ACK_ERROR_CODE_MESSAGE_NOT_FOUND: 'ACK_ERROR_CODE_MESSAGE_NOT_FOUND', - ACK_ERROR_CODE_STORAGE: 'ACK_ERROR_CODE_STORAGE', - ACK_ERROR_CODE_PERMISSION_DENIED: 'ACK_ERROR_CODE_PERMISSION_DENIED', -} as const; - -export type AckErrorCode = - | 'ACK_ERROR_CODE_UNSPECIFIED' - | 0 - | 'ACK_ERROR_CODE_MESSAGE_NOT_FOUND' - | 1 - | 'ACK_ERROR_CODE_STORAGE' - | 2 - | 'ACK_ERROR_CODE_PERMISSION_DENIED' - | 3 - -export type AckErrorCode__Output = typeof AckErrorCode[keyof typeof AckErrorCode] diff --git a/generated/fila/v1/AckMessage.ts b/generated/fila/v1/AckMessage.ts deleted file mode 100644 index dac5ff4..0000000 --- a/generated/fila/v1/AckMessage.ts +++ /dev/null @@ -1,12 +0,0 @@ -// Original file: proto/fila/v1/service.proto - - -export interface AckMessage { - 'queue'?: (string); - 'messageId'?: (string); -} - -export interface AckMessage__Output { - 'queue': (string); - 'messageId': (string); -} diff --git a/generated/fila/v1/AckRequest.ts b/generated/fila/v1/AckRequest.ts deleted file mode 100644 index 3c63ce1..0000000 --- a/generated/fila/v1/AckRequest.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { AckMessage as _fila_v1_AckMessage, AckMessage__Output as _fila_v1_AckMessage__Output } from '../../fila/v1/AckMessage'; - -export interface AckRequest { - 'messages'?: (_fila_v1_AckMessage)[]; -} - -export interface AckRequest__Output { - 'messages': (_fila_v1_AckMessage__Output)[]; -} diff --git a/generated/fila/v1/AckResponse.ts b/generated/fila/v1/AckResponse.ts deleted file mode 100644 index eae1240..0000000 --- a/generated/fila/v1/AckResponse.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { AckResult as _fila_v1_AckResult, AckResult__Output as _fila_v1_AckResult__Output } from '../../fila/v1/AckResult'; - -export interface AckResponse { - 'results'?: (_fila_v1_AckResult)[]; -} - -export interface AckResponse__Output { - 'results': (_fila_v1_AckResult__Output)[]; -} diff --git a/generated/fila/v1/AckResult.ts b/generated/fila/v1/AckResult.ts deleted file mode 100644 index d503b9a..0000000 --- a/generated/fila/v1/AckResult.ts +++ /dev/null @@ -1,16 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { AckSuccess as _fila_v1_AckSuccess, AckSuccess__Output as _fila_v1_AckSuccess__Output } from '../../fila/v1/AckSuccess'; -import type { AckError as _fila_v1_AckError, AckError__Output as _fila_v1_AckError__Output } from '../../fila/v1/AckError'; - -export interface AckResult { - 'success'?: (_fila_v1_AckSuccess | null); - 'error'?: (_fila_v1_AckError | null); - 'result'?: "success"|"error"; -} - -export interface AckResult__Output { - 'success'?: (_fila_v1_AckSuccess__Output | null); - 'error'?: (_fila_v1_AckError__Output | null); - 'result'?: "success"|"error"; -} diff --git a/generated/fila/v1/AckSuccess.ts b/generated/fila/v1/AckSuccess.ts deleted file mode 100644 index 673d632..0000000 --- a/generated/fila/v1/AckSuccess.ts +++ /dev/null @@ -1,8 +0,0 @@ -// Original file: proto/fila/v1/service.proto - - -export interface AckSuccess { -} - -export interface AckSuccess__Output { -} diff --git a/generated/fila/v1/AclPermission.ts b/generated/fila/v1/AclPermission.ts deleted file mode 100644 index 3dec177..0000000 --- a/generated/fila/v1/AclPermission.ts +++ /dev/null @@ -1,12 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface AclPermission { - 'kind'?: (string); - 'pattern'?: (string); -} - -export interface AclPermission__Output { - 'kind': (string); - 'pattern': (string); -} diff --git a/generated/fila/v1/ApiKeyInfo.ts b/generated/fila/v1/ApiKeyInfo.ts deleted file mode 100644 index 0991024..0000000 --- a/generated/fila/v1/ApiKeyInfo.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { Long } from '@grpc/proto-loader'; - -export interface ApiKeyInfo { - 'keyId'?: (string); - 'name'?: (string); - 'createdAtMs'?: (number | string | Long); - 'expiresAtMs'?: (number | string | Long); - 'isSuperadmin'?: (boolean); -} - -export interface ApiKeyInfo__Output { - 'keyId': (string); - 'name': (string); - 'createdAtMs': (string); - 'expiresAtMs': (string); - 'isSuperadmin': (boolean); -} diff --git a/generated/fila/v1/ConfigEntry.ts b/generated/fila/v1/ConfigEntry.ts deleted file mode 100644 index dedefd1..0000000 --- a/generated/fila/v1/ConfigEntry.ts +++ /dev/null @@ -1,12 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface ConfigEntry { - 'key'?: (string); - 'value'?: (string); -} - -export interface ConfigEntry__Output { - 'key': (string); - 'value': (string); -} diff --git a/generated/fila/v1/ConsumeRequest.ts b/generated/fila/v1/ConsumeRequest.ts deleted file mode 100644 index c163cee..0000000 --- a/generated/fila/v1/ConsumeRequest.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Original file: proto/fila/v1/service.proto - - -export interface ConsumeRequest { - 'queue'?: (string); -} - -export interface ConsumeRequest__Output { - 'queue': (string); -} diff --git a/generated/fila/v1/ConsumeResponse.ts b/generated/fila/v1/ConsumeResponse.ts deleted file mode 100644 index bc1f398..0000000 --- a/generated/fila/v1/ConsumeResponse.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { Message as _fila_v1_Message, Message__Output as _fila_v1_Message__Output } from '../../fila/v1/Message'; - -export interface ConsumeResponse { - 'messages'?: (_fila_v1_Message)[]; -} - -export interface ConsumeResponse__Output { - 'messages': (_fila_v1_Message__Output)[]; -} diff --git a/generated/fila/v1/CreateApiKeyRequest.ts b/generated/fila/v1/CreateApiKeyRequest.ts deleted file mode 100644 index 8787c8e..0000000 --- a/generated/fila/v1/CreateApiKeyRequest.ts +++ /dev/null @@ -1,15 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { Long } from '@grpc/proto-loader'; - -export interface CreateApiKeyRequest { - 'name'?: (string); - 'expiresAtMs'?: (number | string | Long); - 'isSuperadmin'?: (boolean); -} - -export interface CreateApiKeyRequest__Output { - 'name': (string); - 'expiresAtMs': (string); - 'isSuperadmin': (boolean); -} diff --git a/generated/fila/v1/CreateApiKeyResponse.ts b/generated/fila/v1/CreateApiKeyResponse.ts deleted file mode 100644 index a25a390..0000000 --- a/generated/fila/v1/CreateApiKeyResponse.ts +++ /dev/null @@ -1,14 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface CreateApiKeyResponse { - 'keyId'?: (string); - 'key'?: (string); - 'isSuperadmin'?: (boolean); -} - -export interface CreateApiKeyResponse__Output { - 'keyId': (string); - 'key': (string); - 'isSuperadmin': (boolean); -} diff --git a/generated/fila/v1/CreateQueueRequest.ts b/generated/fila/v1/CreateQueueRequest.ts deleted file mode 100644 index 10e1367..0000000 --- a/generated/fila/v1/CreateQueueRequest.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { QueueConfig as _fila_v1_QueueConfig, QueueConfig__Output as _fila_v1_QueueConfig__Output } from '../../fila/v1/QueueConfig'; - -export interface CreateQueueRequest { - 'name'?: (string); - 'config'?: (_fila_v1_QueueConfig | null); -} - -export interface CreateQueueRequest__Output { - 'name': (string); - 'config': (_fila_v1_QueueConfig__Output | null); -} diff --git a/generated/fila/v1/CreateQueueResponse.ts b/generated/fila/v1/CreateQueueResponse.ts deleted file mode 100644 index 515b5bc..0000000 --- a/generated/fila/v1/CreateQueueResponse.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface CreateQueueResponse { - 'queueId'?: (string); -} - -export interface CreateQueueResponse__Output { - 'queueId': (string); -} diff --git a/generated/fila/v1/DeleteQueueRequest.ts b/generated/fila/v1/DeleteQueueRequest.ts deleted file mode 100644 index f37e736..0000000 --- a/generated/fila/v1/DeleteQueueRequest.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface DeleteQueueRequest { - 'queue'?: (string); -} - -export interface DeleteQueueRequest__Output { - 'queue': (string); -} diff --git a/generated/fila/v1/DeleteQueueResponse.ts b/generated/fila/v1/DeleteQueueResponse.ts deleted file mode 100644 index 389d1d0..0000000 --- a/generated/fila/v1/DeleteQueueResponse.ts +++ /dev/null @@ -1,8 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface DeleteQueueResponse { -} - -export interface DeleteQueueResponse__Output { -} diff --git a/generated/fila/v1/EnqueueError.ts b/generated/fila/v1/EnqueueError.ts deleted file mode 100644 index 82fe40e..0000000 --- a/generated/fila/v1/EnqueueError.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { EnqueueErrorCode as _fila_v1_EnqueueErrorCode, EnqueueErrorCode__Output as _fila_v1_EnqueueErrorCode__Output } from '../../fila/v1/EnqueueErrorCode'; - -export interface EnqueueError { - 'code'?: (_fila_v1_EnqueueErrorCode); - 'message'?: (string); -} - -export interface EnqueueError__Output { - 'code': (_fila_v1_EnqueueErrorCode__Output); - 'message': (string); -} diff --git a/generated/fila/v1/EnqueueErrorCode.ts b/generated/fila/v1/EnqueueErrorCode.ts deleted file mode 100644 index cc0f38d..0000000 --- a/generated/fila/v1/EnqueueErrorCode.ts +++ /dev/null @@ -1,23 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -export const EnqueueErrorCode = { - ENQUEUE_ERROR_CODE_UNSPECIFIED: 'ENQUEUE_ERROR_CODE_UNSPECIFIED', - ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND: 'ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND', - ENQUEUE_ERROR_CODE_STORAGE: 'ENQUEUE_ERROR_CODE_STORAGE', - ENQUEUE_ERROR_CODE_LUA: 'ENQUEUE_ERROR_CODE_LUA', - ENQUEUE_ERROR_CODE_PERMISSION_DENIED: 'ENQUEUE_ERROR_CODE_PERMISSION_DENIED', -} as const; - -export type EnqueueErrorCode = - | 'ENQUEUE_ERROR_CODE_UNSPECIFIED' - | 0 - | 'ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND' - | 1 - | 'ENQUEUE_ERROR_CODE_STORAGE' - | 2 - | 'ENQUEUE_ERROR_CODE_LUA' - | 3 - | 'ENQUEUE_ERROR_CODE_PERMISSION_DENIED' - | 4 - -export type EnqueueErrorCode__Output = typeof EnqueueErrorCode[keyof typeof EnqueueErrorCode] diff --git a/generated/fila/v1/EnqueueMessage.ts b/generated/fila/v1/EnqueueMessage.ts deleted file mode 100644 index 04b917d..0000000 --- a/generated/fila/v1/EnqueueMessage.ts +++ /dev/null @@ -1,14 +0,0 @@ -// Original file: proto/fila/v1/service.proto - - -export interface EnqueueMessage { - 'queue'?: (string); - 'headers'?: ({[key: string]: string}); - 'payload'?: (Buffer | Uint8Array | string); -} - -export interface EnqueueMessage__Output { - 'queue': (string); - 'headers': ({[key: string]: string}); - 'payload': (Buffer); -} diff --git a/generated/fila/v1/EnqueueRequest.ts b/generated/fila/v1/EnqueueRequest.ts deleted file mode 100644 index e6f0c8e..0000000 --- a/generated/fila/v1/EnqueueRequest.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { EnqueueMessage as _fila_v1_EnqueueMessage, EnqueueMessage__Output as _fila_v1_EnqueueMessage__Output } from '../../fila/v1/EnqueueMessage'; - -export interface EnqueueRequest { - 'messages'?: (_fila_v1_EnqueueMessage)[]; -} - -export interface EnqueueRequest__Output { - 'messages': (_fila_v1_EnqueueMessage__Output)[]; -} diff --git a/generated/fila/v1/EnqueueResponse.ts b/generated/fila/v1/EnqueueResponse.ts deleted file mode 100644 index 271c440..0000000 --- a/generated/fila/v1/EnqueueResponse.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { EnqueueResult as _fila_v1_EnqueueResult, EnqueueResult__Output as _fila_v1_EnqueueResult__Output } from '../../fila/v1/EnqueueResult'; - -export interface EnqueueResponse { - 'results'?: (_fila_v1_EnqueueResult)[]; -} - -export interface EnqueueResponse__Output { - 'results': (_fila_v1_EnqueueResult__Output)[]; -} diff --git a/generated/fila/v1/EnqueueResult.ts b/generated/fila/v1/EnqueueResult.ts deleted file mode 100644 index 3691008..0000000 --- a/generated/fila/v1/EnqueueResult.ts +++ /dev/null @@ -1,15 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { EnqueueError as _fila_v1_EnqueueError, EnqueueError__Output as _fila_v1_EnqueueError__Output } from '../../fila/v1/EnqueueError'; - -export interface EnqueueResult { - 'messageId'?: (string); - 'error'?: (_fila_v1_EnqueueError | null); - 'result'?: "messageId"|"error"; -} - -export interface EnqueueResult__Output { - 'messageId'?: (string); - 'error'?: (_fila_v1_EnqueueError__Output | null); - 'result'?: "messageId"|"error"; -} diff --git a/generated/fila/v1/FilaAdmin.ts b/generated/fila/v1/FilaAdmin.ts deleted file mode 100644 index 977f024..0000000 --- a/generated/fila/v1/FilaAdmin.ts +++ /dev/null @@ -1,195 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type * as grpc from '@grpc/grpc-js' -import type { MethodDefinition } from '@grpc/proto-loader' -import type { CreateApiKeyRequest as _fila_v1_CreateApiKeyRequest, CreateApiKeyRequest__Output as _fila_v1_CreateApiKeyRequest__Output } from '../../fila/v1/CreateApiKeyRequest'; -import type { CreateApiKeyResponse as _fila_v1_CreateApiKeyResponse, CreateApiKeyResponse__Output as _fila_v1_CreateApiKeyResponse__Output } from '../../fila/v1/CreateApiKeyResponse'; -import type { CreateQueueRequest as _fila_v1_CreateQueueRequest, CreateQueueRequest__Output as _fila_v1_CreateQueueRequest__Output } from '../../fila/v1/CreateQueueRequest'; -import type { CreateQueueResponse as _fila_v1_CreateQueueResponse, CreateQueueResponse__Output as _fila_v1_CreateQueueResponse__Output } from '../../fila/v1/CreateQueueResponse'; -import type { DeleteQueueRequest as _fila_v1_DeleteQueueRequest, DeleteQueueRequest__Output as _fila_v1_DeleteQueueRequest__Output } from '../../fila/v1/DeleteQueueRequest'; -import type { DeleteQueueResponse as _fila_v1_DeleteQueueResponse, DeleteQueueResponse__Output as _fila_v1_DeleteQueueResponse__Output } from '../../fila/v1/DeleteQueueResponse'; -import type { GetAclRequest as _fila_v1_GetAclRequest, GetAclRequest__Output as _fila_v1_GetAclRequest__Output } from '../../fila/v1/GetAclRequest'; -import type { GetAclResponse as _fila_v1_GetAclResponse, GetAclResponse__Output as _fila_v1_GetAclResponse__Output } from '../../fila/v1/GetAclResponse'; -import type { GetConfigRequest as _fila_v1_GetConfigRequest, GetConfigRequest__Output as _fila_v1_GetConfigRequest__Output } from '../../fila/v1/GetConfigRequest'; -import type { GetConfigResponse as _fila_v1_GetConfigResponse, GetConfigResponse__Output as _fila_v1_GetConfigResponse__Output } from '../../fila/v1/GetConfigResponse'; -import type { GetStatsRequest as _fila_v1_GetStatsRequest, GetStatsRequest__Output as _fila_v1_GetStatsRequest__Output } from '../../fila/v1/GetStatsRequest'; -import type { GetStatsResponse as _fila_v1_GetStatsResponse, GetStatsResponse__Output as _fila_v1_GetStatsResponse__Output } from '../../fila/v1/GetStatsResponse'; -import type { ListApiKeysRequest as _fila_v1_ListApiKeysRequest, ListApiKeysRequest__Output as _fila_v1_ListApiKeysRequest__Output } from '../../fila/v1/ListApiKeysRequest'; -import type { ListApiKeysResponse as _fila_v1_ListApiKeysResponse, ListApiKeysResponse__Output as _fila_v1_ListApiKeysResponse__Output } from '../../fila/v1/ListApiKeysResponse'; -import type { ListConfigRequest as _fila_v1_ListConfigRequest, ListConfigRequest__Output as _fila_v1_ListConfigRequest__Output } from '../../fila/v1/ListConfigRequest'; -import type { ListConfigResponse as _fila_v1_ListConfigResponse, ListConfigResponse__Output as _fila_v1_ListConfigResponse__Output } from '../../fila/v1/ListConfigResponse'; -import type { ListQueuesRequest as _fila_v1_ListQueuesRequest, ListQueuesRequest__Output as _fila_v1_ListQueuesRequest__Output } from '../../fila/v1/ListQueuesRequest'; -import type { ListQueuesResponse as _fila_v1_ListQueuesResponse, ListQueuesResponse__Output as _fila_v1_ListQueuesResponse__Output } from '../../fila/v1/ListQueuesResponse'; -import type { RedriveRequest as _fila_v1_RedriveRequest, RedriveRequest__Output as _fila_v1_RedriveRequest__Output } from '../../fila/v1/RedriveRequest'; -import type { RedriveResponse as _fila_v1_RedriveResponse, RedriveResponse__Output as _fila_v1_RedriveResponse__Output } from '../../fila/v1/RedriveResponse'; -import type { RevokeApiKeyRequest as _fila_v1_RevokeApiKeyRequest, RevokeApiKeyRequest__Output as _fila_v1_RevokeApiKeyRequest__Output } from '../../fila/v1/RevokeApiKeyRequest'; -import type { RevokeApiKeyResponse as _fila_v1_RevokeApiKeyResponse, RevokeApiKeyResponse__Output as _fila_v1_RevokeApiKeyResponse__Output } from '../../fila/v1/RevokeApiKeyResponse'; -import type { SetAclRequest as _fila_v1_SetAclRequest, SetAclRequest__Output as _fila_v1_SetAclRequest__Output } from '../../fila/v1/SetAclRequest'; -import type { SetAclResponse as _fila_v1_SetAclResponse, SetAclResponse__Output as _fila_v1_SetAclResponse__Output } from '../../fila/v1/SetAclResponse'; -import type { SetConfigRequest as _fila_v1_SetConfigRequest, SetConfigRequest__Output as _fila_v1_SetConfigRequest__Output } from '../../fila/v1/SetConfigRequest'; -import type { SetConfigResponse as _fila_v1_SetConfigResponse, SetConfigResponse__Output as _fila_v1_SetConfigResponse__Output } from '../../fila/v1/SetConfigResponse'; - -export interface FilaAdminClient extends grpc.Client { - CreateApiKey(argument: _fila_v1_CreateApiKeyRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_CreateApiKeyResponse__Output>): grpc.ClientUnaryCall; - CreateApiKey(argument: _fila_v1_CreateApiKeyRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_CreateApiKeyResponse__Output>): grpc.ClientUnaryCall; - CreateApiKey(argument: _fila_v1_CreateApiKeyRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_CreateApiKeyResponse__Output>): grpc.ClientUnaryCall; - CreateApiKey(argument: _fila_v1_CreateApiKeyRequest, callback: grpc.requestCallback<_fila_v1_CreateApiKeyResponse__Output>): grpc.ClientUnaryCall; - createApiKey(argument: _fila_v1_CreateApiKeyRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_CreateApiKeyResponse__Output>): grpc.ClientUnaryCall; - createApiKey(argument: _fila_v1_CreateApiKeyRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_CreateApiKeyResponse__Output>): grpc.ClientUnaryCall; - createApiKey(argument: _fila_v1_CreateApiKeyRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_CreateApiKeyResponse__Output>): grpc.ClientUnaryCall; - createApiKey(argument: _fila_v1_CreateApiKeyRequest, callback: grpc.requestCallback<_fila_v1_CreateApiKeyResponse__Output>): grpc.ClientUnaryCall; - - CreateQueue(argument: _fila_v1_CreateQueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_CreateQueueResponse__Output>): grpc.ClientUnaryCall; - CreateQueue(argument: _fila_v1_CreateQueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_CreateQueueResponse__Output>): grpc.ClientUnaryCall; - CreateQueue(argument: _fila_v1_CreateQueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_CreateQueueResponse__Output>): grpc.ClientUnaryCall; - CreateQueue(argument: _fila_v1_CreateQueueRequest, callback: grpc.requestCallback<_fila_v1_CreateQueueResponse__Output>): grpc.ClientUnaryCall; - createQueue(argument: _fila_v1_CreateQueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_CreateQueueResponse__Output>): grpc.ClientUnaryCall; - createQueue(argument: _fila_v1_CreateQueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_CreateQueueResponse__Output>): grpc.ClientUnaryCall; - createQueue(argument: _fila_v1_CreateQueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_CreateQueueResponse__Output>): grpc.ClientUnaryCall; - createQueue(argument: _fila_v1_CreateQueueRequest, callback: grpc.requestCallback<_fila_v1_CreateQueueResponse__Output>): grpc.ClientUnaryCall; - - DeleteQueue(argument: _fila_v1_DeleteQueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_DeleteQueueResponse__Output>): grpc.ClientUnaryCall; - DeleteQueue(argument: _fila_v1_DeleteQueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_DeleteQueueResponse__Output>): grpc.ClientUnaryCall; - DeleteQueue(argument: _fila_v1_DeleteQueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_DeleteQueueResponse__Output>): grpc.ClientUnaryCall; - DeleteQueue(argument: _fila_v1_DeleteQueueRequest, callback: grpc.requestCallback<_fila_v1_DeleteQueueResponse__Output>): grpc.ClientUnaryCall; - deleteQueue(argument: _fila_v1_DeleteQueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_DeleteQueueResponse__Output>): grpc.ClientUnaryCall; - deleteQueue(argument: _fila_v1_DeleteQueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_DeleteQueueResponse__Output>): grpc.ClientUnaryCall; - deleteQueue(argument: _fila_v1_DeleteQueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_DeleteQueueResponse__Output>): grpc.ClientUnaryCall; - deleteQueue(argument: _fila_v1_DeleteQueueRequest, callback: grpc.requestCallback<_fila_v1_DeleteQueueResponse__Output>): grpc.ClientUnaryCall; - - GetAcl(argument: _fila_v1_GetAclRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetAclResponse__Output>): grpc.ClientUnaryCall; - GetAcl(argument: _fila_v1_GetAclRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_GetAclResponse__Output>): grpc.ClientUnaryCall; - GetAcl(argument: _fila_v1_GetAclRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetAclResponse__Output>): grpc.ClientUnaryCall; - GetAcl(argument: _fila_v1_GetAclRequest, callback: grpc.requestCallback<_fila_v1_GetAclResponse__Output>): grpc.ClientUnaryCall; - getAcl(argument: _fila_v1_GetAclRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetAclResponse__Output>): grpc.ClientUnaryCall; - getAcl(argument: _fila_v1_GetAclRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_GetAclResponse__Output>): grpc.ClientUnaryCall; - getAcl(argument: _fila_v1_GetAclRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetAclResponse__Output>): grpc.ClientUnaryCall; - getAcl(argument: _fila_v1_GetAclRequest, callback: grpc.requestCallback<_fila_v1_GetAclResponse__Output>): grpc.ClientUnaryCall; - - GetConfig(argument: _fila_v1_GetConfigRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetConfigResponse__Output>): grpc.ClientUnaryCall; - GetConfig(argument: _fila_v1_GetConfigRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_GetConfigResponse__Output>): grpc.ClientUnaryCall; - GetConfig(argument: _fila_v1_GetConfigRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetConfigResponse__Output>): grpc.ClientUnaryCall; - GetConfig(argument: _fila_v1_GetConfigRequest, callback: grpc.requestCallback<_fila_v1_GetConfigResponse__Output>): grpc.ClientUnaryCall; - getConfig(argument: _fila_v1_GetConfigRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetConfigResponse__Output>): grpc.ClientUnaryCall; - getConfig(argument: _fila_v1_GetConfigRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_GetConfigResponse__Output>): grpc.ClientUnaryCall; - getConfig(argument: _fila_v1_GetConfigRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetConfigResponse__Output>): grpc.ClientUnaryCall; - getConfig(argument: _fila_v1_GetConfigRequest, callback: grpc.requestCallback<_fila_v1_GetConfigResponse__Output>): grpc.ClientUnaryCall; - - GetStats(argument: _fila_v1_GetStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetStatsResponse__Output>): grpc.ClientUnaryCall; - GetStats(argument: _fila_v1_GetStatsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_GetStatsResponse__Output>): grpc.ClientUnaryCall; - GetStats(argument: _fila_v1_GetStatsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetStatsResponse__Output>): grpc.ClientUnaryCall; - GetStats(argument: _fila_v1_GetStatsRequest, callback: grpc.requestCallback<_fila_v1_GetStatsResponse__Output>): grpc.ClientUnaryCall; - getStats(argument: _fila_v1_GetStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetStatsResponse__Output>): grpc.ClientUnaryCall; - getStats(argument: _fila_v1_GetStatsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_GetStatsResponse__Output>): grpc.ClientUnaryCall; - getStats(argument: _fila_v1_GetStatsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_GetStatsResponse__Output>): grpc.ClientUnaryCall; - getStats(argument: _fila_v1_GetStatsRequest, callback: grpc.requestCallback<_fila_v1_GetStatsResponse__Output>): grpc.ClientUnaryCall; - - ListApiKeys(argument: _fila_v1_ListApiKeysRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListApiKeysResponse__Output>): grpc.ClientUnaryCall; - ListApiKeys(argument: _fila_v1_ListApiKeysRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_ListApiKeysResponse__Output>): grpc.ClientUnaryCall; - ListApiKeys(argument: _fila_v1_ListApiKeysRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListApiKeysResponse__Output>): grpc.ClientUnaryCall; - ListApiKeys(argument: _fila_v1_ListApiKeysRequest, callback: grpc.requestCallback<_fila_v1_ListApiKeysResponse__Output>): grpc.ClientUnaryCall; - listApiKeys(argument: _fila_v1_ListApiKeysRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListApiKeysResponse__Output>): grpc.ClientUnaryCall; - listApiKeys(argument: _fila_v1_ListApiKeysRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_ListApiKeysResponse__Output>): grpc.ClientUnaryCall; - listApiKeys(argument: _fila_v1_ListApiKeysRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListApiKeysResponse__Output>): grpc.ClientUnaryCall; - listApiKeys(argument: _fila_v1_ListApiKeysRequest, callback: grpc.requestCallback<_fila_v1_ListApiKeysResponse__Output>): grpc.ClientUnaryCall; - - ListConfig(argument: _fila_v1_ListConfigRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListConfigResponse__Output>): grpc.ClientUnaryCall; - ListConfig(argument: _fila_v1_ListConfigRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_ListConfigResponse__Output>): grpc.ClientUnaryCall; - ListConfig(argument: _fila_v1_ListConfigRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListConfigResponse__Output>): grpc.ClientUnaryCall; - ListConfig(argument: _fila_v1_ListConfigRequest, callback: grpc.requestCallback<_fila_v1_ListConfigResponse__Output>): grpc.ClientUnaryCall; - listConfig(argument: _fila_v1_ListConfigRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListConfigResponse__Output>): grpc.ClientUnaryCall; - listConfig(argument: _fila_v1_ListConfigRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_ListConfigResponse__Output>): grpc.ClientUnaryCall; - listConfig(argument: _fila_v1_ListConfigRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListConfigResponse__Output>): grpc.ClientUnaryCall; - listConfig(argument: _fila_v1_ListConfigRequest, callback: grpc.requestCallback<_fila_v1_ListConfigResponse__Output>): grpc.ClientUnaryCall; - - ListQueues(argument: _fila_v1_ListQueuesRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListQueuesResponse__Output>): grpc.ClientUnaryCall; - ListQueues(argument: _fila_v1_ListQueuesRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_ListQueuesResponse__Output>): grpc.ClientUnaryCall; - ListQueues(argument: _fila_v1_ListQueuesRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListQueuesResponse__Output>): grpc.ClientUnaryCall; - ListQueues(argument: _fila_v1_ListQueuesRequest, callback: grpc.requestCallback<_fila_v1_ListQueuesResponse__Output>): grpc.ClientUnaryCall; - listQueues(argument: _fila_v1_ListQueuesRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListQueuesResponse__Output>): grpc.ClientUnaryCall; - listQueues(argument: _fila_v1_ListQueuesRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_ListQueuesResponse__Output>): grpc.ClientUnaryCall; - listQueues(argument: _fila_v1_ListQueuesRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_ListQueuesResponse__Output>): grpc.ClientUnaryCall; - listQueues(argument: _fila_v1_ListQueuesRequest, callback: grpc.requestCallback<_fila_v1_ListQueuesResponse__Output>): grpc.ClientUnaryCall; - - Redrive(argument: _fila_v1_RedriveRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_RedriveResponse__Output>): grpc.ClientUnaryCall; - Redrive(argument: _fila_v1_RedriveRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_RedriveResponse__Output>): grpc.ClientUnaryCall; - Redrive(argument: _fila_v1_RedriveRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_RedriveResponse__Output>): grpc.ClientUnaryCall; - Redrive(argument: _fila_v1_RedriveRequest, callback: grpc.requestCallback<_fila_v1_RedriveResponse__Output>): grpc.ClientUnaryCall; - redrive(argument: _fila_v1_RedriveRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_RedriveResponse__Output>): grpc.ClientUnaryCall; - redrive(argument: _fila_v1_RedriveRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_RedriveResponse__Output>): grpc.ClientUnaryCall; - redrive(argument: _fila_v1_RedriveRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_RedriveResponse__Output>): grpc.ClientUnaryCall; - redrive(argument: _fila_v1_RedriveRequest, callback: grpc.requestCallback<_fila_v1_RedriveResponse__Output>): grpc.ClientUnaryCall; - - RevokeApiKey(argument: _fila_v1_RevokeApiKeyRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_RevokeApiKeyResponse__Output>): grpc.ClientUnaryCall; - RevokeApiKey(argument: _fila_v1_RevokeApiKeyRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_RevokeApiKeyResponse__Output>): grpc.ClientUnaryCall; - RevokeApiKey(argument: _fila_v1_RevokeApiKeyRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_RevokeApiKeyResponse__Output>): grpc.ClientUnaryCall; - RevokeApiKey(argument: _fila_v1_RevokeApiKeyRequest, callback: grpc.requestCallback<_fila_v1_RevokeApiKeyResponse__Output>): grpc.ClientUnaryCall; - revokeApiKey(argument: _fila_v1_RevokeApiKeyRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_RevokeApiKeyResponse__Output>): grpc.ClientUnaryCall; - revokeApiKey(argument: _fila_v1_RevokeApiKeyRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_RevokeApiKeyResponse__Output>): grpc.ClientUnaryCall; - revokeApiKey(argument: _fila_v1_RevokeApiKeyRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_RevokeApiKeyResponse__Output>): grpc.ClientUnaryCall; - revokeApiKey(argument: _fila_v1_RevokeApiKeyRequest, callback: grpc.requestCallback<_fila_v1_RevokeApiKeyResponse__Output>): grpc.ClientUnaryCall; - - SetAcl(argument: _fila_v1_SetAclRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_SetAclResponse__Output>): grpc.ClientUnaryCall; - SetAcl(argument: _fila_v1_SetAclRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_SetAclResponse__Output>): grpc.ClientUnaryCall; - SetAcl(argument: _fila_v1_SetAclRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_SetAclResponse__Output>): grpc.ClientUnaryCall; - SetAcl(argument: _fila_v1_SetAclRequest, callback: grpc.requestCallback<_fila_v1_SetAclResponse__Output>): grpc.ClientUnaryCall; - setAcl(argument: _fila_v1_SetAclRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_SetAclResponse__Output>): grpc.ClientUnaryCall; - setAcl(argument: _fila_v1_SetAclRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_SetAclResponse__Output>): grpc.ClientUnaryCall; - setAcl(argument: _fila_v1_SetAclRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_SetAclResponse__Output>): grpc.ClientUnaryCall; - setAcl(argument: _fila_v1_SetAclRequest, callback: grpc.requestCallback<_fila_v1_SetAclResponse__Output>): grpc.ClientUnaryCall; - - SetConfig(argument: _fila_v1_SetConfigRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_SetConfigResponse__Output>): grpc.ClientUnaryCall; - SetConfig(argument: _fila_v1_SetConfigRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_SetConfigResponse__Output>): grpc.ClientUnaryCall; - SetConfig(argument: _fila_v1_SetConfigRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_SetConfigResponse__Output>): grpc.ClientUnaryCall; - SetConfig(argument: _fila_v1_SetConfigRequest, callback: grpc.requestCallback<_fila_v1_SetConfigResponse__Output>): grpc.ClientUnaryCall; - setConfig(argument: _fila_v1_SetConfigRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_SetConfigResponse__Output>): grpc.ClientUnaryCall; - setConfig(argument: _fila_v1_SetConfigRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_SetConfigResponse__Output>): grpc.ClientUnaryCall; - setConfig(argument: _fila_v1_SetConfigRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_SetConfigResponse__Output>): grpc.ClientUnaryCall; - setConfig(argument: _fila_v1_SetConfigRequest, callback: grpc.requestCallback<_fila_v1_SetConfigResponse__Output>): grpc.ClientUnaryCall; - -} - -export interface FilaAdminHandlers extends grpc.UntypedServiceImplementation { - CreateApiKey: grpc.handleUnaryCall<_fila_v1_CreateApiKeyRequest__Output, _fila_v1_CreateApiKeyResponse>; - - CreateQueue: grpc.handleUnaryCall<_fila_v1_CreateQueueRequest__Output, _fila_v1_CreateQueueResponse>; - - DeleteQueue: grpc.handleUnaryCall<_fila_v1_DeleteQueueRequest__Output, _fila_v1_DeleteQueueResponse>; - - GetAcl: grpc.handleUnaryCall<_fila_v1_GetAclRequest__Output, _fila_v1_GetAclResponse>; - - GetConfig: grpc.handleUnaryCall<_fila_v1_GetConfigRequest__Output, _fila_v1_GetConfigResponse>; - - GetStats: grpc.handleUnaryCall<_fila_v1_GetStatsRequest__Output, _fila_v1_GetStatsResponse>; - - ListApiKeys: grpc.handleUnaryCall<_fila_v1_ListApiKeysRequest__Output, _fila_v1_ListApiKeysResponse>; - - ListConfig: grpc.handleUnaryCall<_fila_v1_ListConfigRequest__Output, _fila_v1_ListConfigResponse>; - - ListQueues: grpc.handleUnaryCall<_fila_v1_ListQueuesRequest__Output, _fila_v1_ListQueuesResponse>; - - Redrive: grpc.handleUnaryCall<_fila_v1_RedriveRequest__Output, _fila_v1_RedriveResponse>; - - RevokeApiKey: grpc.handleUnaryCall<_fila_v1_RevokeApiKeyRequest__Output, _fila_v1_RevokeApiKeyResponse>; - - SetAcl: grpc.handleUnaryCall<_fila_v1_SetAclRequest__Output, _fila_v1_SetAclResponse>; - - SetConfig: grpc.handleUnaryCall<_fila_v1_SetConfigRequest__Output, _fila_v1_SetConfigResponse>; - -} - -export interface FilaAdminDefinition extends grpc.ServiceDefinition { - CreateApiKey: MethodDefinition<_fila_v1_CreateApiKeyRequest, _fila_v1_CreateApiKeyResponse, _fila_v1_CreateApiKeyRequest__Output, _fila_v1_CreateApiKeyResponse__Output> - CreateQueue: MethodDefinition<_fila_v1_CreateQueueRequest, _fila_v1_CreateQueueResponse, _fila_v1_CreateQueueRequest__Output, _fila_v1_CreateQueueResponse__Output> - DeleteQueue: MethodDefinition<_fila_v1_DeleteQueueRequest, _fila_v1_DeleteQueueResponse, _fila_v1_DeleteQueueRequest__Output, _fila_v1_DeleteQueueResponse__Output> - GetAcl: MethodDefinition<_fila_v1_GetAclRequest, _fila_v1_GetAclResponse, _fila_v1_GetAclRequest__Output, _fila_v1_GetAclResponse__Output> - GetConfig: MethodDefinition<_fila_v1_GetConfigRequest, _fila_v1_GetConfigResponse, _fila_v1_GetConfigRequest__Output, _fila_v1_GetConfigResponse__Output> - GetStats: MethodDefinition<_fila_v1_GetStatsRequest, _fila_v1_GetStatsResponse, _fila_v1_GetStatsRequest__Output, _fila_v1_GetStatsResponse__Output> - ListApiKeys: MethodDefinition<_fila_v1_ListApiKeysRequest, _fila_v1_ListApiKeysResponse, _fila_v1_ListApiKeysRequest__Output, _fila_v1_ListApiKeysResponse__Output> - ListConfig: MethodDefinition<_fila_v1_ListConfigRequest, _fila_v1_ListConfigResponse, _fila_v1_ListConfigRequest__Output, _fila_v1_ListConfigResponse__Output> - ListQueues: MethodDefinition<_fila_v1_ListQueuesRequest, _fila_v1_ListQueuesResponse, _fila_v1_ListQueuesRequest__Output, _fila_v1_ListQueuesResponse__Output> - Redrive: MethodDefinition<_fila_v1_RedriveRequest, _fila_v1_RedriveResponse, _fila_v1_RedriveRequest__Output, _fila_v1_RedriveResponse__Output> - RevokeApiKey: MethodDefinition<_fila_v1_RevokeApiKeyRequest, _fila_v1_RevokeApiKeyResponse, _fila_v1_RevokeApiKeyRequest__Output, _fila_v1_RevokeApiKeyResponse__Output> - SetAcl: MethodDefinition<_fila_v1_SetAclRequest, _fila_v1_SetAclResponse, _fila_v1_SetAclRequest__Output, _fila_v1_SetAclResponse__Output> - SetConfig: MethodDefinition<_fila_v1_SetConfigRequest, _fila_v1_SetConfigResponse, _fila_v1_SetConfigRequest__Output, _fila_v1_SetConfigResponse__Output> -} diff --git a/generated/fila/v1/FilaService.ts b/generated/fila/v1/FilaService.ts deleted file mode 100644 index 868ec1b..0000000 --- a/generated/fila/v1/FilaService.ts +++ /dev/null @@ -1,75 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type * as grpc from '@grpc/grpc-js' -import type { MethodDefinition } from '@grpc/proto-loader' -import type { AckRequest as _fila_v1_AckRequest, AckRequest__Output as _fila_v1_AckRequest__Output } from '../../fila/v1/AckRequest'; -import type { AckResponse as _fila_v1_AckResponse, AckResponse__Output as _fila_v1_AckResponse__Output } from '../../fila/v1/AckResponse'; -import type { ConsumeRequest as _fila_v1_ConsumeRequest, ConsumeRequest__Output as _fila_v1_ConsumeRequest__Output } from '../../fila/v1/ConsumeRequest'; -import type { ConsumeResponse as _fila_v1_ConsumeResponse, ConsumeResponse__Output as _fila_v1_ConsumeResponse__Output } from '../../fila/v1/ConsumeResponse'; -import type { EnqueueRequest as _fila_v1_EnqueueRequest, EnqueueRequest__Output as _fila_v1_EnqueueRequest__Output } from '../../fila/v1/EnqueueRequest'; -import type { EnqueueResponse as _fila_v1_EnqueueResponse, EnqueueResponse__Output as _fila_v1_EnqueueResponse__Output } from '../../fila/v1/EnqueueResponse'; -import type { NackRequest as _fila_v1_NackRequest, NackRequest__Output as _fila_v1_NackRequest__Output } from '../../fila/v1/NackRequest'; -import type { NackResponse as _fila_v1_NackResponse, NackResponse__Output as _fila_v1_NackResponse__Output } from '../../fila/v1/NackResponse'; -import type { StreamEnqueueRequest as _fila_v1_StreamEnqueueRequest, StreamEnqueueRequest__Output as _fila_v1_StreamEnqueueRequest__Output } from '../../fila/v1/StreamEnqueueRequest'; -import type { StreamEnqueueResponse as _fila_v1_StreamEnqueueResponse, StreamEnqueueResponse__Output as _fila_v1_StreamEnqueueResponse__Output } from '../../fila/v1/StreamEnqueueResponse'; - -export interface FilaServiceClient extends grpc.Client { - Ack(argument: _fila_v1_AckRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; - Ack(argument: _fila_v1_AckRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; - Ack(argument: _fila_v1_AckRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; - Ack(argument: _fila_v1_AckRequest, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; - ack(argument: _fila_v1_AckRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; - ack(argument: _fila_v1_AckRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; - ack(argument: _fila_v1_AckRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; - ack(argument: _fila_v1_AckRequest, callback: grpc.requestCallback<_fila_v1_AckResponse__Output>): grpc.ClientUnaryCall; - - Consume(argument: _fila_v1_ConsumeRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; - Consume(argument: _fila_v1_ConsumeRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; - consume(argument: _fila_v1_ConsumeRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; - consume(argument: _fila_v1_ConsumeRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_fila_v1_ConsumeResponse__Output>; - - Enqueue(argument: _fila_v1_EnqueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_EnqueueResponse__Output>): grpc.ClientUnaryCall; - Enqueue(argument: _fila_v1_EnqueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_EnqueueResponse__Output>): grpc.ClientUnaryCall; - Enqueue(argument: _fila_v1_EnqueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_EnqueueResponse__Output>): grpc.ClientUnaryCall; - Enqueue(argument: _fila_v1_EnqueueRequest, callback: grpc.requestCallback<_fila_v1_EnqueueResponse__Output>): grpc.ClientUnaryCall; - enqueue(argument: _fila_v1_EnqueueRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_EnqueueResponse__Output>): grpc.ClientUnaryCall; - enqueue(argument: _fila_v1_EnqueueRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_EnqueueResponse__Output>): grpc.ClientUnaryCall; - enqueue(argument: _fila_v1_EnqueueRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_EnqueueResponse__Output>): grpc.ClientUnaryCall; - enqueue(argument: _fila_v1_EnqueueRequest, callback: grpc.requestCallback<_fila_v1_EnqueueResponse__Output>): grpc.ClientUnaryCall; - - Nack(argument: _fila_v1_NackRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; - Nack(argument: _fila_v1_NackRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; - Nack(argument: _fila_v1_NackRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; - Nack(argument: _fila_v1_NackRequest, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; - nack(argument: _fila_v1_NackRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; - nack(argument: _fila_v1_NackRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; - nack(argument: _fila_v1_NackRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; - nack(argument: _fila_v1_NackRequest, callback: grpc.requestCallback<_fila_v1_NackResponse__Output>): grpc.ClientUnaryCall; - - StreamEnqueue(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse__Output>; - StreamEnqueue(options?: grpc.CallOptions): grpc.ClientDuplexStream<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse__Output>; - streamEnqueue(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse__Output>; - streamEnqueue(options?: grpc.CallOptions): grpc.ClientDuplexStream<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse__Output>; - -} - -export interface FilaServiceHandlers extends grpc.UntypedServiceImplementation { - Ack: grpc.handleUnaryCall<_fila_v1_AckRequest__Output, _fila_v1_AckResponse>; - - Consume: grpc.handleServerStreamingCall<_fila_v1_ConsumeRequest__Output, _fila_v1_ConsumeResponse>; - - Enqueue: grpc.handleUnaryCall<_fila_v1_EnqueueRequest__Output, _fila_v1_EnqueueResponse>; - - Nack: grpc.handleUnaryCall<_fila_v1_NackRequest__Output, _fila_v1_NackResponse>; - - StreamEnqueue: grpc.handleBidiStreamingCall<_fila_v1_StreamEnqueueRequest__Output, _fila_v1_StreamEnqueueResponse>; - -} - -export interface FilaServiceDefinition extends grpc.ServiceDefinition { - Ack: MethodDefinition<_fila_v1_AckRequest, _fila_v1_AckResponse, _fila_v1_AckRequest__Output, _fila_v1_AckResponse__Output> - Consume: MethodDefinition<_fila_v1_ConsumeRequest, _fila_v1_ConsumeResponse, _fila_v1_ConsumeRequest__Output, _fila_v1_ConsumeResponse__Output> - Enqueue: MethodDefinition<_fila_v1_EnqueueRequest, _fila_v1_EnqueueResponse, _fila_v1_EnqueueRequest__Output, _fila_v1_EnqueueResponse__Output> - Nack: MethodDefinition<_fila_v1_NackRequest, _fila_v1_NackResponse, _fila_v1_NackRequest__Output, _fila_v1_NackResponse__Output> - StreamEnqueue: MethodDefinition<_fila_v1_StreamEnqueueRequest, _fila_v1_StreamEnqueueResponse, _fila_v1_StreamEnqueueRequest__Output, _fila_v1_StreamEnqueueResponse__Output> -} diff --git a/generated/fila/v1/GetAclRequest.ts b/generated/fila/v1/GetAclRequest.ts deleted file mode 100644 index 4320684..0000000 --- a/generated/fila/v1/GetAclRequest.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface GetAclRequest { - 'keyId'?: (string); -} - -export interface GetAclRequest__Output { - 'keyId': (string); -} diff --git a/generated/fila/v1/GetAclResponse.ts b/generated/fila/v1/GetAclResponse.ts deleted file mode 100644 index 71b5ecd..0000000 --- a/generated/fila/v1/GetAclResponse.ts +++ /dev/null @@ -1,15 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { AclPermission as _fila_v1_AclPermission, AclPermission__Output as _fila_v1_AclPermission__Output } from '../../fila/v1/AclPermission'; - -export interface GetAclResponse { - 'keyId'?: (string); - 'permissions'?: (_fila_v1_AclPermission)[]; - 'isSuperadmin'?: (boolean); -} - -export interface GetAclResponse__Output { - 'keyId': (string); - 'permissions': (_fila_v1_AclPermission__Output)[]; - 'isSuperadmin': (boolean); -} diff --git a/generated/fila/v1/GetConfigRequest.ts b/generated/fila/v1/GetConfigRequest.ts deleted file mode 100644 index b551da1..0000000 --- a/generated/fila/v1/GetConfigRequest.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface GetConfigRequest { - 'key'?: (string); -} - -export interface GetConfigRequest__Output { - 'key': (string); -} diff --git a/generated/fila/v1/GetConfigResponse.ts b/generated/fila/v1/GetConfigResponse.ts deleted file mode 100644 index bbe78e0..0000000 --- a/generated/fila/v1/GetConfigResponse.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface GetConfigResponse { - 'value'?: (string); -} - -export interface GetConfigResponse__Output { - 'value': (string); -} diff --git a/generated/fila/v1/GetStatsRequest.ts b/generated/fila/v1/GetStatsRequest.ts deleted file mode 100644 index 651ae87..0000000 --- a/generated/fila/v1/GetStatsRequest.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface GetStatsRequest { - 'queue'?: (string); -} - -export interface GetStatsRequest__Output { - 'queue': (string); -} diff --git a/generated/fila/v1/GetStatsResponse.ts b/generated/fila/v1/GetStatsResponse.ts deleted file mode 100644 index 12f18e1..0000000 --- a/generated/fila/v1/GetStatsResponse.ts +++ /dev/null @@ -1,29 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { PerFairnessKeyStats as _fila_v1_PerFairnessKeyStats, PerFairnessKeyStats__Output as _fila_v1_PerFairnessKeyStats__Output } from '../../fila/v1/PerFairnessKeyStats'; -import type { PerThrottleKeyStats as _fila_v1_PerThrottleKeyStats, PerThrottleKeyStats__Output as _fila_v1_PerThrottleKeyStats__Output } from '../../fila/v1/PerThrottleKeyStats'; -import type { Long } from '@grpc/proto-loader'; - -export interface GetStatsResponse { - 'depth'?: (number | string | Long); - 'inFlight'?: (number | string | Long); - 'activeFairnessKeys'?: (number | string | Long); - 'activeConsumers'?: (number); - 'quantum'?: (number); - 'perKeyStats'?: (_fila_v1_PerFairnessKeyStats)[]; - 'perThrottleStats'?: (_fila_v1_PerThrottleKeyStats)[]; - 'leaderNodeId'?: (number | string | Long); - 'replicationCount'?: (number); -} - -export interface GetStatsResponse__Output { - 'depth': (string); - 'inFlight': (string); - 'activeFairnessKeys': (string); - 'activeConsumers': (number); - 'quantum': (number); - 'perKeyStats': (_fila_v1_PerFairnessKeyStats__Output)[]; - 'perThrottleStats': (_fila_v1_PerThrottleKeyStats__Output)[]; - 'leaderNodeId': (string); - 'replicationCount': (number); -} diff --git a/generated/fila/v1/ListApiKeysRequest.ts b/generated/fila/v1/ListApiKeysRequest.ts deleted file mode 100644 index 157576e..0000000 --- a/generated/fila/v1/ListApiKeysRequest.ts +++ /dev/null @@ -1,8 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface ListApiKeysRequest { -} - -export interface ListApiKeysRequest__Output { -} diff --git a/generated/fila/v1/ListApiKeysResponse.ts b/generated/fila/v1/ListApiKeysResponse.ts deleted file mode 100644 index 94f2f42..0000000 --- a/generated/fila/v1/ListApiKeysResponse.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { ApiKeyInfo as _fila_v1_ApiKeyInfo, ApiKeyInfo__Output as _fila_v1_ApiKeyInfo__Output } from '../../fila/v1/ApiKeyInfo'; - -export interface ListApiKeysResponse { - 'keys'?: (_fila_v1_ApiKeyInfo)[]; -} - -export interface ListApiKeysResponse__Output { - 'keys': (_fila_v1_ApiKeyInfo__Output)[]; -} diff --git a/generated/fila/v1/ListConfigRequest.ts b/generated/fila/v1/ListConfigRequest.ts deleted file mode 100644 index d27dc68..0000000 --- a/generated/fila/v1/ListConfigRequest.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface ListConfigRequest { - 'prefix'?: (string); -} - -export interface ListConfigRequest__Output { - 'prefix': (string); -} diff --git a/generated/fila/v1/ListConfigResponse.ts b/generated/fila/v1/ListConfigResponse.ts deleted file mode 100644 index b19b8c5..0000000 --- a/generated/fila/v1/ListConfigResponse.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { ConfigEntry as _fila_v1_ConfigEntry, ConfigEntry__Output as _fila_v1_ConfigEntry__Output } from '../../fila/v1/ConfigEntry'; - -export interface ListConfigResponse { - 'entries'?: (_fila_v1_ConfigEntry)[]; - 'totalCount'?: (number); -} - -export interface ListConfigResponse__Output { - 'entries': (_fila_v1_ConfigEntry__Output)[]; - 'totalCount': (number); -} diff --git a/generated/fila/v1/ListQueuesRequest.ts b/generated/fila/v1/ListQueuesRequest.ts deleted file mode 100644 index d0050e1..0000000 --- a/generated/fila/v1/ListQueuesRequest.ts +++ /dev/null @@ -1,8 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface ListQueuesRequest { -} - -export interface ListQueuesRequest__Output { -} diff --git a/generated/fila/v1/ListQueuesResponse.ts b/generated/fila/v1/ListQueuesResponse.ts deleted file mode 100644 index 7f01010..0000000 --- a/generated/fila/v1/ListQueuesResponse.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { QueueInfo as _fila_v1_QueueInfo, QueueInfo__Output as _fila_v1_QueueInfo__Output } from '../../fila/v1/QueueInfo'; - -export interface ListQueuesResponse { - 'queues'?: (_fila_v1_QueueInfo)[]; - 'clusterNodeCount'?: (number); -} - -export interface ListQueuesResponse__Output { - 'queues': (_fila_v1_QueueInfo__Output)[]; - 'clusterNodeCount': (number); -} diff --git a/generated/fila/v1/Message.ts b/generated/fila/v1/Message.ts deleted file mode 100644 index 16f2b3e..0000000 --- a/generated/fila/v1/Message.ts +++ /dev/null @@ -1,20 +0,0 @@ -// Original file: proto/fila/v1/messages.proto - -import type { MessageMetadata as _fila_v1_MessageMetadata, MessageMetadata__Output as _fila_v1_MessageMetadata__Output } from '../../fila/v1/MessageMetadata'; -import type { MessageTimestamps as _fila_v1_MessageTimestamps, MessageTimestamps__Output as _fila_v1_MessageTimestamps__Output } from '../../fila/v1/MessageTimestamps'; - -export interface Message { - 'id'?: (string); - 'headers'?: ({[key: string]: string}); - 'payload'?: (Buffer | Uint8Array | string); - 'metadata'?: (_fila_v1_MessageMetadata | null); - 'timestamps'?: (_fila_v1_MessageTimestamps | null); -} - -export interface Message__Output { - 'id': (string); - 'headers': ({[key: string]: string}); - 'payload': (Buffer); - 'metadata': (_fila_v1_MessageMetadata__Output | null); - 'timestamps': (_fila_v1_MessageTimestamps__Output | null); -} diff --git a/generated/fila/v1/MessageMetadata.ts b/generated/fila/v1/MessageMetadata.ts deleted file mode 100644 index 395c051..0000000 --- a/generated/fila/v1/MessageMetadata.ts +++ /dev/null @@ -1,18 +0,0 @@ -// Original file: proto/fila/v1/messages.proto - - -export interface MessageMetadata { - 'fairnessKey'?: (string); - 'weight'?: (number); - 'throttleKeys'?: (string)[]; - 'attemptCount'?: (number); - 'queueId'?: (string); -} - -export interface MessageMetadata__Output { - 'fairnessKey': (string); - 'weight': (number); - 'throttleKeys': (string)[]; - 'attemptCount': (number); - 'queueId': (string); -} diff --git a/generated/fila/v1/MessageTimestamps.ts b/generated/fila/v1/MessageTimestamps.ts deleted file mode 100644 index 140f491..0000000 --- a/generated/fila/v1/MessageTimestamps.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: proto/fila/v1/messages.proto - -import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../google/protobuf/Timestamp'; - -export interface MessageTimestamps { - 'enqueuedAt'?: (_google_protobuf_Timestamp | null); - 'leasedAt'?: (_google_protobuf_Timestamp | null); -} - -export interface MessageTimestamps__Output { - 'enqueuedAt': (_google_protobuf_Timestamp__Output | null); - 'leasedAt': (_google_protobuf_Timestamp__Output | null); -} diff --git a/generated/fila/v1/NackError.ts b/generated/fila/v1/NackError.ts deleted file mode 100644 index 2cc2888..0000000 --- a/generated/fila/v1/NackError.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { NackErrorCode as _fila_v1_NackErrorCode, NackErrorCode__Output as _fila_v1_NackErrorCode__Output } from '../../fila/v1/NackErrorCode'; - -export interface NackError { - 'code'?: (_fila_v1_NackErrorCode); - 'message'?: (string); -} - -export interface NackError__Output { - 'code': (_fila_v1_NackErrorCode__Output); - 'message': (string); -} diff --git a/generated/fila/v1/NackErrorCode.ts b/generated/fila/v1/NackErrorCode.ts deleted file mode 100644 index e7738f7..0000000 --- a/generated/fila/v1/NackErrorCode.ts +++ /dev/null @@ -1,20 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -export const NackErrorCode = { - NACK_ERROR_CODE_UNSPECIFIED: 'NACK_ERROR_CODE_UNSPECIFIED', - NACK_ERROR_CODE_MESSAGE_NOT_FOUND: 'NACK_ERROR_CODE_MESSAGE_NOT_FOUND', - NACK_ERROR_CODE_STORAGE: 'NACK_ERROR_CODE_STORAGE', - NACK_ERROR_CODE_PERMISSION_DENIED: 'NACK_ERROR_CODE_PERMISSION_DENIED', -} as const; - -export type NackErrorCode = - | 'NACK_ERROR_CODE_UNSPECIFIED' - | 0 - | 'NACK_ERROR_CODE_MESSAGE_NOT_FOUND' - | 1 - | 'NACK_ERROR_CODE_STORAGE' - | 2 - | 'NACK_ERROR_CODE_PERMISSION_DENIED' - | 3 - -export type NackErrorCode__Output = typeof NackErrorCode[keyof typeof NackErrorCode] diff --git a/generated/fila/v1/NackMessage.ts b/generated/fila/v1/NackMessage.ts deleted file mode 100644 index 2ce0501..0000000 --- a/generated/fila/v1/NackMessage.ts +++ /dev/null @@ -1,14 +0,0 @@ -// Original file: proto/fila/v1/service.proto - - -export interface NackMessage { - 'queue'?: (string); - 'messageId'?: (string); - 'error'?: (string); -} - -export interface NackMessage__Output { - 'queue': (string); - 'messageId': (string); - 'error': (string); -} diff --git a/generated/fila/v1/NackRequest.ts b/generated/fila/v1/NackRequest.ts deleted file mode 100644 index 2f2450d..0000000 --- a/generated/fila/v1/NackRequest.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { NackMessage as _fila_v1_NackMessage, NackMessage__Output as _fila_v1_NackMessage__Output } from '../../fila/v1/NackMessage'; - -export interface NackRequest { - 'messages'?: (_fila_v1_NackMessage)[]; -} - -export interface NackRequest__Output { - 'messages': (_fila_v1_NackMessage__Output)[]; -} diff --git a/generated/fila/v1/NackResponse.ts b/generated/fila/v1/NackResponse.ts deleted file mode 100644 index fd00fe1..0000000 --- a/generated/fila/v1/NackResponse.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { NackResult as _fila_v1_NackResult, NackResult__Output as _fila_v1_NackResult__Output } from '../../fila/v1/NackResult'; - -export interface NackResponse { - 'results'?: (_fila_v1_NackResult)[]; -} - -export interface NackResponse__Output { - 'results': (_fila_v1_NackResult__Output)[]; -} diff --git a/generated/fila/v1/NackResult.ts b/generated/fila/v1/NackResult.ts deleted file mode 100644 index 9a205d9..0000000 --- a/generated/fila/v1/NackResult.ts +++ /dev/null @@ -1,16 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { NackSuccess as _fila_v1_NackSuccess, NackSuccess__Output as _fila_v1_NackSuccess__Output } from '../../fila/v1/NackSuccess'; -import type { NackError as _fila_v1_NackError, NackError__Output as _fila_v1_NackError__Output } from '../../fila/v1/NackError'; - -export interface NackResult { - 'success'?: (_fila_v1_NackSuccess | null); - 'error'?: (_fila_v1_NackError | null); - 'result'?: "success"|"error"; -} - -export interface NackResult__Output { - 'success'?: (_fila_v1_NackSuccess__Output | null); - 'error'?: (_fila_v1_NackError__Output | null); - 'result'?: "success"|"error"; -} diff --git a/generated/fila/v1/NackSuccess.ts b/generated/fila/v1/NackSuccess.ts deleted file mode 100644 index cd61ea8..0000000 --- a/generated/fila/v1/NackSuccess.ts +++ /dev/null @@ -1,8 +0,0 @@ -// Original file: proto/fila/v1/service.proto - - -export interface NackSuccess { -} - -export interface NackSuccess__Output { -} diff --git a/generated/fila/v1/PerFairnessKeyStats.ts b/generated/fila/v1/PerFairnessKeyStats.ts deleted file mode 100644 index 931553f..0000000 --- a/generated/fila/v1/PerFairnessKeyStats.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { Long } from '@grpc/proto-loader'; - -export interface PerFairnessKeyStats { - 'key'?: (string); - 'pendingCount'?: (number | string | Long); - 'currentDeficit'?: (number | string | Long); - 'weight'?: (number); -} - -export interface PerFairnessKeyStats__Output { - 'key': (string); - 'pendingCount': (string); - 'currentDeficit': (string); - 'weight': (number); -} diff --git a/generated/fila/v1/PerThrottleKeyStats.ts b/generated/fila/v1/PerThrottleKeyStats.ts deleted file mode 100644 index 069b44c..0000000 --- a/generated/fila/v1/PerThrottleKeyStats.ts +++ /dev/null @@ -1,16 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface PerThrottleKeyStats { - 'key'?: (string); - 'tokens'?: (number | string); - 'ratePerSecond'?: (number | string); - 'burst'?: (number | string); -} - -export interface PerThrottleKeyStats__Output { - 'key': (string); - 'tokens': (number); - 'ratePerSecond': (number); - 'burst': (number); -} diff --git a/generated/fila/v1/QueueConfig.ts b/generated/fila/v1/QueueConfig.ts deleted file mode 100644 index 9c0517e..0000000 --- a/generated/fila/v1/QueueConfig.ts +++ /dev/null @@ -1,15 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { Long } from '@grpc/proto-loader'; - -export interface QueueConfig { - 'onEnqueueScript'?: (string); - 'onFailureScript'?: (string); - 'visibilityTimeoutMs'?: (number | string | Long); -} - -export interface QueueConfig__Output { - 'onEnqueueScript': (string); - 'onFailureScript': (string); - 'visibilityTimeoutMs': (string); -} diff --git a/generated/fila/v1/QueueInfo.ts b/generated/fila/v1/QueueInfo.ts deleted file mode 100644 index 09f3677..0000000 --- a/generated/fila/v1/QueueInfo.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { Long } from '@grpc/proto-loader'; - -export interface QueueInfo { - 'name'?: (string); - 'depth'?: (number | string | Long); - 'inFlight'?: (number | string | Long); - 'activeConsumers'?: (number); - 'leaderNodeId'?: (number | string | Long); -} - -export interface QueueInfo__Output { - 'name': (string); - 'depth': (string); - 'inFlight': (string); - 'activeConsumers': (number); - 'leaderNodeId': (string); -} diff --git a/generated/fila/v1/RedriveRequest.ts b/generated/fila/v1/RedriveRequest.ts deleted file mode 100644 index 10e7839..0000000 --- a/generated/fila/v1/RedriveRequest.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { Long } from '@grpc/proto-loader'; - -export interface RedriveRequest { - 'dlqQueue'?: (string); - 'count'?: (number | string | Long); -} - -export interface RedriveRequest__Output { - 'dlqQueue': (string); - 'count': (string); -} diff --git a/generated/fila/v1/RedriveResponse.ts b/generated/fila/v1/RedriveResponse.ts deleted file mode 100644 index cd4170a..0000000 --- a/generated/fila/v1/RedriveResponse.ts +++ /dev/null @@ -1,11 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { Long } from '@grpc/proto-loader'; - -export interface RedriveResponse { - 'redriven'?: (number | string | Long); -} - -export interface RedriveResponse__Output { - 'redriven': (string); -} diff --git a/generated/fila/v1/RevokeApiKeyRequest.ts b/generated/fila/v1/RevokeApiKeyRequest.ts deleted file mode 100644 index e13223f..0000000 --- a/generated/fila/v1/RevokeApiKeyRequest.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface RevokeApiKeyRequest { - 'keyId'?: (string); -} - -export interface RevokeApiKeyRequest__Output { - 'keyId': (string); -} diff --git a/generated/fila/v1/RevokeApiKeyResponse.ts b/generated/fila/v1/RevokeApiKeyResponse.ts deleted file mode 100644 index ab63276..0000000 --- a/generated/fila/v1/RevokeApiKeyResponse.ts +++ /dev/null @@ -1,8 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface RevokeApiKeyResponse { -} - -export interface RevokeApiKeyResponse__Output { -} diff --git a/generated/fila/v1/SetAclRequest.ts b/generated/fila/v1/SetAclRequest.ts deleted file mode 100644 index 05b9c58..0000000 --- a/generated/fila/v1/SetAclRequest.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - -import type { AclPermission as _fila_v1_AclPermission, AclPermission__Output as _fila_v1_AclPermission__Output } from '../../fila/v1/AclPermission'; - -export interface SetAclRequest { - 'keyId'?: (string); - 'permissions'?: (_fila_v1_AclPermission)[]; -} - -export interface SetAclRequest__Output { - 'keyId': (string); - 'permissions': (_fila_v1_AclPermission__Output)[]; -} diff --git a/generated/fila/v1/SetAclResponse.ts b/generated/fila/v1/SetAclResponse.ts deleted file mode 100644 index 73fa5cd..0000000 --- a/generated/fila/v1/SetAclResponse.ts +++ /dev/null @@ -1,8 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface SetAclResponse { -} - -export interface SetAclResponse__Output { -} diff --git a/generated/fila/v1/SetConfigRequest.ts b/generated/fila/v1/SetConfigRequest.ts deleted file mode 100644 index ce8ad17..0000000 --- a/generated/fila/v1/SetConfigRequest.ts +++ /dev/null @@ -1,12 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface SetConfigRequest { - 'key'?: (string); - 'value'?: (string); -} - -export interface SetConfigRequest__Output { - 'key': (string); - 'value': (string); -} diff --git a/generated/fila/v1/SetConfigResponse.ts b/generated/fila/v1/SetConfigResponse.ts deleted file mode 100644 index ec82c90..0000000 --- a/generated/fila/v1/SetConfigResponse.ts +++ /dev/null @@ -1,8 +0,0 @@ -// Original file: proto/fila/v1/admin.proto - - -export interface SetConfigResponse { -} - -export interface SetConfigResponse__Output { -} diff --git a/generated/fila/v1/StreamEnqueueRequest.ts b/generated/fila/v1/StreamEnqueueRequest.ts deleted file mode 100644 index 03d0b32..0000000 --- a/generated/fila/v1/StreamEnqueueRequest.ts +++ /dev/null @@ -1,14 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { EnqueueMessage as _fila_v1_EnqueueMessage, EnqueueMessage__Output as _fila_v1_EnqueueMessage__Output } from '../../fila/v1/EnqueueMessage'; -import type { Long } from '@grpc/proto-loader'; - -export interface StreamEnqueueRequest { - 'messages'?: (_fila_v1_EnqueueMessage)[]; - 'sequenceNumber'?: (number | string | Long); -} - -export interface StreamEnqueueRequest__Output { - 'messages': (_fila_v1_EnqueueMessage__Output)[]; - 'sequenceNumber': (string); -} diff --git a/generated/fila/v1/StreamEnqueueResponse.ts b/generated/fila/v1/StreamEnqueueResponse.ts deleted file mode 100644 index 56c5586..0000000 --- a/generated/fila/v1/StreamEnqueueResponse.ts +++ /dev/null @@ -1,14 +0,0 @@ -// Original file: proto/fila/v1/service.proto - -import type { EnqueueResult as _fila_v1_EnqueueResult, EnqueueResult__Output as _fila_v1_EnqueueResult__Output } from '../../fila/v1/EnqueueResult'; -import type { Long } from '@grpc/proto-loader'; - -export interface StreamEnqueueResponse { - 'sequenceNumber'?: (number | string | Long); - 'results'?: (_fila_v1_EnqueueResult)[]; -} - -export interface StreamEnqueueResponse__Output { - 'sequenceNumber': (string); - 'results': (_fila_v1_EnqueueResult__Output)[]; -} diff --git a/generated/google/protobuf/Timestamp.ts b/generated/google/protobuf/Timestamp.ts deleted file mode 100644 index ceaa32b..0000000 --- a/generated/google/protobuf/Timestamp.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Original file: null - -import type { Long } from '@grpc/proto-loader'; - -export interface Timestamp { - 'seconds'?: (number | string | Long); - 'nanos'?: (number); -} - -export interface Timestamp__Output { - 'seconds': (string); - 'nanos': (number); -} diff --git a/generated/messages.ts b/generated/messages.ts deleted file mode 100644 index 52dc51d..0000000 --- a/generated/messages.ts +++ /dev/null @@ -1,23 +0,0 @@ -import type * as grpc from '@grpc/grpc-js'; -import type { MessageTypeDefinition } from '@grpc/proto-loader'; - - -type SubtypeConstructor any, Subtype> = { - new(...args: ConstructorParameters): Subtype; -}; - -export interface ProtoGrpcType { - fila: { - v1: { - Message: MessageTypeDefinition - MessageMetadata: MessageTypeDefinition - MessageTimestamps: MessageTypeDefinition - } - } - google: { - protobuf: { - Timestamp: MessageTypeDefinition - } - } -} - diff --git a/generated/service.ts b/generated/service.ts deleted file mode 100644 index eb61748..0000000 --- a/generated/service.ts +++ /dev/null @@ -1,49 +0,0 @@ -import type * as grpc from '@grpc/grpc-js'; -import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; - -import type { FilaServiceClient as _fila_v1_FilaServiceClient, FilaServiceDefinition as _fila_v1_FilaServiceDefinition } from './fila/v1/FilaService'; - -type SubtypeConstructor any, Subtype> = { - new(...args: ConstructorParameters): Subtype; -}; - -export interface ProtoGrpcType { - fila: { - v1: { - AckError: MessageTypeDefinition - AckErrorCode: EnumTypeDefinition - AckMessage: MessageTypeDefinition - AckRequest: MessageTypeDefinition - AckResponse: MessageTypeDefinition - AckResult: MessageTypeDefinition - AckSuccess: MessageTypeDefinition - ConsumeRequest: MessageTypeDefinition - ConsumeResponse: MessageTypeDefinition - EnqueueError: MessageTypeDefinition - EnqueueErrorCode: EnumTypeDefinition - EnqueueMessage: MessageTypeDefinition - EnqueueRequest: MessageTypeDefinition - EnqueueResponse: MessageTypeDefinition - EnqueueResult: MessageTypeDefinition - FilaService: SubtypeConstructor & { service: _fila_v1_FilaServiceDefinition } - Message: MessageTypeDefinition - MessageMetadata: MessageTypeDefinition - MessageTimestamps: MessageTypeDefinition - NackError: MessageTypeDefinition - NackErrorCode: EnumTypeDefinition - NackMessage: MessageTypeDefinition - NackRequest: MessageTypeDefinition - NackResponse: MessageTypeDefinition - NackResult: MessageTypeDefinition - NackSuccess: MessageTypeDefinition - StreamEnqueueRequest: MessageTypeDefinition - StreamEnqueueResponse: MessageTypeDefinition - } - } - google: { - protobuf: { - Timestamp: MessageTypeDefinition - } - } -} - diff --git a/package-lock.json b/package-lock.json index 0b953af..9eb883d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,17 +1,13 @@ { "name": "fila-client", - "version": "0.2.0", + "version": "0.3.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "fila-client", - "version": "0.2.0", + "version": "0.3.0", "license": "AGPL-3.0-or-later", - "dependencies": { - "@grpc/grpc-js": "^1.12.0", - "@grpc/proto-loader": "^0.7.0" - }, "devDependencies": { "@eslint/js": "^9.0.0", "@types/node": "^22.0.0", @@ -607,55 +603,6 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/@grpc/grpc-js": { - "version": "1.14.3", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.14.3.tgz", - "integrity": "sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA==", - "license": "Apache-2.0", - "dependencies": { - "@grpc/proto-loader": "^0.8.0", - "@js-sdsl/ordered-map": "^4.4.2" - }, - "engines": { - "node": ">=12.10.0" - } - }, - "node_modules/@grpc/grpc-js/node_modules/@grpc/proto-loader": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.8.0.tgz", - "integrity": "sha512-rc1hOQtjIWGxcxpb9aHAfLpIctjEnsDehj0DAiVfBlmT84uvR0uUtN2hEi/ecvWVjXUGf5qPF4qEgiLOx1YIMQ==", - "license": "Apache-2.0", - "dependencies": { - "lodash.camelcase": "^4.3.0", - "long": "^5.0.0", - "protobufjs": "^7.5.3", - "yargs": "^17.7.2" - }, - "bin": { - "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@grpc/proto-loader": { - "version": "0.7.15", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.15.tgz", - "integrity": "sha512-tMXdRCfYVixjuFK+Hk0Q1s38gV9zDiDJfWL3h1rv4Qc39oILCu1TRTDt7+fGUI8K4G1Fj125Hx/ru3azECWTyQ==", - "license": "Apache-2.0", - "dependencies": { - "lodash.camelcase": "^4.3.0", - "long": "^5.0.0", - "protobufjs": "^7.2.5", - "yargs": "^17.7.2" - }, - "bin": { - "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" - }, - "engines": { - "node": ">=6" - } - }, "node_modules/@humanfs/core": { "version": "0.19.1", "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", @@ -715,80 +662,6 @@ "dev": true, "license": "MIT" }, - "node_modules/@js-sdsl/ordered-map": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz", - "integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/js-sdsl" - } - }, - "node_modules/@protobufjs/aspromise": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/base64": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/codegen": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/eventemitter": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/fetch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", - "license": "BSD-3-Clause", - "dependencies": { - "@protobufjs/aspromise": "^1.1.1", - "@protobufjs/inquire": "^1.1.0" - } - }, - "node_modules/@protobufjs/float": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/inquire": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/path": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/pool": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/utf8": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", - "license": "BSD-3-Clause" - }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.57.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", @@ -1175,7 +1048,9 @@ "version": "22.19.11", "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.11.tgz", "integrity": "sha512-BH7YwL6rA93ReqeQS1c4bsPpcfOmJasG+Fkr6Y59q83f9M1WcBRHR2vM+P9eOisYRcN3ujQoiZY8uk5W+1WL8w==", + "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~6.21.0" } @@ -1606,19 +1481,11 @@ "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/ansi-styles": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, "license": "MIT", "dependencies": { "color-convert": "^2.0.1" @@ -1729,24 +1596,11 @@ "node": ">= 16" } }, - "node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, "license": "MIT", "dependencies": { "color-name": "~1.1.4" @@ -1759,6 +1613,7 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, "license": "MIT" }, "node_modules/concat-map": { @@ -1818,12 +1673,6 @@ "dev": true, "license": "MIT" }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, "node_modules/es-module-lexer": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", @@ -1873,15 +1722,6 @@ "@esbuild/win32-x64": "0.27.3" } }, - "node_modules/escalade": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -2175,15 +2015,6 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "license": "ISC", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -2267,15 +2098,6 @@ "node": ">=0.10.0" } }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -2377,12 +2199,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/lodash.camelcase": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==", - "license": "MIT" - }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -2390,12 +2206,6 @@ "dev": true, "license": "MIT" }, - "node_modules/long": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", - "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", - "license": "Apache-2.0" - }, "node_modules/loupe": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", @@ -2619,30 +2429,6 @@ "node": ">= 0.8.0" } }, - "node_modules/protobufjs": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz", - "integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==", - "hasInstallScript": true, - "license": "BSD-3-Clause", - "dependencies": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/node": ">=13.7.0", - "long": "^5.0.0" - }, - "engines": { - "node": ">=12.0.0" - } - }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -2653,15 +2439,6 @@ "node": ">=6" } }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -2784,32 +2561,6 @@ "dev": true, "license": "MIT" }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", @@ -2979,6 +2730,7 @@ "version": "6.21.0", "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, "license": "MIT" }, "node_modules/uri-js": { @@ -3206,59 +2958,6 @@ "node": ">=0.10.0" } }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "license": "MIT", - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", diff --git a/package.json b/package.json index abf30ec..b869f69 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "fila-client", - "version": "0.2.0", + "version": "0.3.0", "description": "JavaScript/TypeScript client SDK for the Fila message broker", "repository": { "type": "git", @@ -9,21 +9,16 @@ "main": "dist/src/index.js", "types": "dist/src/index.d.ts", "files": [ - "dist/", - "proto/" + "dist/" ], "scripts": { "build": "tsc", "lint": "eslint src/ test/", "typecheck": "tsc --noEmit", - "test": "vitest run", - "generate": "proto-loader-gen-types --longs=String --enums=String --defaults --oneofs --grpcLib=@grpc/grpc-js --outDir=generated/ proto/fila/v1/messages.proto proto/fila/v1/service.proto proto/fila/v1/admin.proto" + "test": "vitest run" }, "license": "AGPL-3.0-or-later", - "dependencies": { - "@grpc/grpc-js": "^1.12.0", - "@grpc/proto-loader": "^0.7.0" - }, + "dependencies": {}, "devDependencies": { "@types/node": "^22.0.0", "eslint": "^9.0.0", diff --git a/proto/fila/v1/admin.proto b/proto/fila/v1/admin.proto deleted file mode 100644 index 886e58d..0000000 --- a/proto/fila/v1/admin.proto +++ /dev/null @@ -1,197 +0,0 @@ -syntax = "proto3"; -package fila.v1; - -// Admin RPCs for operators and the CLI. -service FilaAdmin { - rpc CreateQueue(CreateQueueRequest) returns (CreateQueueResponse); - rpc DeleteQueue(DeleteQueueRequest) returns (DeleteQueueResponse); - rpc SetConfig(SetConfigRequest) returns (SetConfigResponse); - rpc GetConfig(GetConfigRequest) returns (GetConfigResponse); - rpc ListConfig(ListConfigRequest) returns (ListConfigResponse); - rpc GetStats(GetStatsRequest) returns (GetStatsResponse); - rpc Redrive(RedriveRequest) returns (RedriveResponse); - rpc ListQueues(ListQueuesRequest) returns (ListQueuesResponse); - - // API key management. CreateApiKey bypasses auth (bootstrap); others require a valid key. - rpc CreateApiKey(CreateApiKeyRequest) returns (CreateApiKeyResponse); - rpc RevokeApiKey(RevokeApiKeyRequest) returns (RevokeApiKeyResponse); - rpc ListApiKeys(ListApiKeysRequest) returns (ListApiKeysResponse); - - // Per-key ACL management. - rpc SetAcl(SetAclRequest) returns (SetAclResponse); - rpc GetAcl(GetAclRequest) returns (GetAclResponse); -} - -message CreateQueueRequest { - string name = 1; - QueueConfig config = 2; -} - -message QueueConfig { - string on_enqueue_script = 1; - string on_failure_script = 2; - uint64 visibility_timeout_ms = 3; -} - -message CreateQueueResponse { - string queue_id = 1; -} - -message DeleteQueueRequest { - string queue = 1; -} - -message DeleteQueueResponse {} - -message SetConfigRequest { - string key = 1; - string value = 2; -} - -message SetConfigResponse {} - -message GetConfigRequest { - string key = 1; -} - -message GetConfigResponse { - string value = 1; -} - -message ConfigEntry { - string key = 1; - string value = 2; -} - -message ListConfigRequest { - string prefix = 1; -} - -message ListConfigResponse { - repeated ConfigEntry entries = 1; - uint32 total_count = 2; -} - -message GetStatsRequest { - string queue = 1; -} - -message PerFairnessKeyStats { - string key = 1; - uint64 pending_count = 2; - int64 current_deficit = 3; - uint32 weight = 4; -} - -message PerThrottleKeyStats { - string key = 1; - double tokens = 2; - double rate_per_second = 3; - double burst = 4; -} - -message GetStatsResponse { - uint64 depth = 1; - uint64 in_flight = 2; - uint64 active_fairness_keys = 3; - uint32 active_consumers = 4; - uint32 quantum = 5; - repeated PerFairnessKeyStats per_key_stats = 6; - repeated PerThrottleKeyStats per_throttle_stats = 7; - // Cluster fields (0 when not in cluster mode). - uint64 leader_node_id = 8; - uint32 replication_count = 9; -} - -message RedriveRequest { - string dlq_queue = 1; - uint64 count = 2; -} - -message RedriveResponse { - uint64 redriven = 1; -} - -message ListQueuesRequest {} - -message QueueInfo { - string name = 1; - uint64 depth = 2; - uint64 in_flight = 3; - uint32 active_consumers = 4; - uint64 leader_node_id = 5; -} - -message ListQueuesResponse { - repeated QueueInfo queues = 1; - uint32 cluster_node_count = 2; -} - -// --- API Key Management --- - -message CreateApiKeyRequest { - /// Human-readable label for the key. - string name = 1; - /// Optional Unix timestamp (milliseconds) after which the key expires. - /// 0 means no expiration. - uint64 expires_at_ms = 2; - /// When true, the key bypasses all ACL checks (superadmin). - bool is_superadmin = 3; -} - -message CreateApiKeyResponse { - /// Opaque key ID for management operations (revoke, list, set-acl). - string key_id = 1; - /// Plaintext API key. Returned once — store it securely. - string key = 2; - /// Whether this key has superadmin privileges. - bool is_superadmin = 3; -} - -message RevokeApiKeyRequest { - string key_id = 1; -} - -message RevokeApiKeyResponse {} - -message ListApiKeysRequest {} - -message ApiKeyInfo { - string key_id = 1; - string name = 2; - uint64 created_at_ms = 3; - /// 0 means no expiration. - uint64 expires_at_ms = 4; - bool is_superadmin = 5; -} - -message ListApiKeysResponse { - repeated ApiKeyInfo keys = 1; -} - -// --- ACL Management --- - -/// A single permission grant: kind (produce/consume/admin) + queue pattern. -message AclPermission { - /// One of: "produce", "consume", "admin". - string kind = 1; - /// Queue name or wildcard ("*" or "orders.*"). - string pattern = 2; -} - -message SetAclRequest { - string key_id = 1; - repeated AclPermission permissions = 2; -} - -message SetAclResponse {} - -message GetAclRequest { - string key_id = 1; -} - -message GetAclResponse { - string key_id = 1; - repeated AclPermission permissions = 2; - bool is_superadmin = 3; -} diff --git a/proto/fila/v1/messages.proto b/proto/fila/v1/messages.proto deleted file mode 100644 index a0709cf..0000000 --- a/proto/fila/v1/messages.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; -package fila.v1; - -import "google/protobuf/timestamp.proto"; - -// Core message envelope persisted in the broker. -message Message { - string id = 1; - map headers = 2; - bytes payload = 3; - MessageMetadata metadata = 4; - MessageTimestamps timestamps = 5; -} - -// Broker-assigned scheduling metadata. -message MessageMetadata { - string fairness_key = 1; - uint32 weight = 2; - repeated string throttle_keys = 3; - uint32 attempt_count = 4; - string queue_id = 5; -} - -// Lifecycle timestamps attached to every message. -message MessageTimestamps { - google.protobuf.Timestamp enqueued_at = 1; - google.protobuf.Timestamp leased_at = 2; -} diff --git a/proto/fila/v1/service.proto b/proto/fila/v1/service.proto deleted file mode 100644 index 7d1db79..0000000 --- a/proto/fila/v1/service.proto +++ /dev/null @@ -1,142 +0,0 @@ -syntax = "proto3"; -package fila.v1; - -import "fila/v1/messages.proto"; - -// Hot-path RPCs for producers and consumers. -service FilaService { - rpc Enqueue(EnqueueRequest) returns (EnqueueResponse); - rpc StreamEnqueue(stream StreamEnqueueRequest) returns (stream StreamEnqueueResponse); - rpc Consume(ConsumeRequest) returns (stream ConsumeResponse); - rpc Ack(AckRequest) returns (AckResponse); - rpc Nack(NackRequest) returns (NackResponse); -} - -// Individual message to enqueue. -message EnqueueMessage { - string queue = 1; - map headers = 2; - bytes payload = 3; -} - -// Enqueue one or more messages. -message EnqueueRequest { - repeated EnqueueMessage messages = 1; -} - -// Per-message enqueue result. -message EnqueueResult { - oneof result { - string message_id = 1; - EnqueueError error = 2; - } -} - -// Typed enqueue error with structured error code. -message EnqueueError { - EnqueueErrorCode code = 1; - string message = 2; -} - -enum EnqueueErrorCode { - ENQUEUE_ERROR_CODE_UNSPECIFIED = 0; - ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND = 1; - ENQUEUE_ERROR_CODE_STORAGE = 2; - ENQUEUE_ERROR_CODE_LUA = 3; - ENQUEUE_ERROR_CODE_PERMISSION_DENIED = 4; -} - -// One result per input message. -message EnqueueResponse { - repeated EnqueueResult results = 1; -} - -message ConsumeRequest { - string queue = 1; -} - -message ConsumeResponse { - repeated Message messages = 1; -} - -// Individual ack item. -message AckMessage { - string queue = 1; - string message_id = 2; -} - -message AckRequest { - repeated AckMessage messages = 1; -} - -message AckResult { - oneof result { - AckSuccess success = 1; - AckError error = 2; - } -} - -message AckSuccess {} - -message AckError { - AckErrorCode code = 1; - string message = 2; -} - -enum AckErrorCode { - ACK_ERROR_CODE_UNSPECIFIED = 0; - ACK_ERROR_CODE_MESSAGE_NOT_FOUND = 1; - ACK_ERROR_CODE_STORAGE = 2; - ACK_ERROR_CODE_PERMISSION_DENIED = 3; -} - -message AckResponse { - repeated AckResult results = 1; -} - -// Individual nack item. -message NackMessage { - string queue = 1; - string message_id = 2; - string error = 3; -} - -message NackRequest { - repeated NackMessage messages = 1; -} - -message NackResult { - oneof result { - NackSuccess success = 1; - NackError error = 2; - } -} - -message NackSuccess {} - -message NackError { - NackErrorCode code = 1; - string message = 2; -} - -enum NackErrorCode { - NACK_ERROR_CODE_UNSPECIFIED = 0; - NACK_ERROR_CODE_MESSAGE_NOT_FOUND = 1; - NACK_ERROR_CODE_STORAGE = 2; - NACK_ERROR_CODE_PERMISSION_DENIED = 3; -} - -message NackResponse { - repeated NackResult results = 1; -} - -// Stream enqueue — per-write batch with sequence tracking. -message StreamEnqueueRequest { - repeated EnqueueMessage messages = 1; - uint64 sequence_number = 2; -} - -message StreamEnqueueResponse { - uint64 sequence_number = 1; - repeated EnqueueResult results = 2; -} diff --git a/src/batcher.ts b/src/batcher.ts index 214478d..b9e590e 100644 --- a/src/batcher.ts +++ b/src/batcher.ts @@ -1,9 +1,14 @@ -import * as grpc from "@grpc/grpc-js"; - -import { QueueNotFoundError, RPCError } from "./errors"; +import { + Encoder, + Decoder, + OP_ENQUEUE, + OP_ENQUEUE_RESULT, + OP_ERROR, + ERR_OK, +} from "./fibp"; +import { ProtocolError, mapErrorCode, mapItemErrorCode } from "./errors"; import type { EnqueueMessage } from "./types"; -import type { FilaServiceClient } from "../generated/fila/v1/FilaService"; -import type { EnqueueResponse__Output } from "../generated/fila/v1/EnqueueResponse"; +import type { Connection } from "./connection"; /** Controls how the SDK batches enqueue() calls. */ export type BatchMode = @@ -18,32 +23,13 @@ interface BatchItem { reject: (err: Error) => void; } -/** - * Map a per-message EnqueueResult error to an SDK error. - * The unified proto uses typed EnqueueError with an error code. - */ -function mapResultError(code: string, message: string): Error { - if (code === "ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND") { - return new QueueNotFoundError(`enqueue: ${message}`); - } - return new RPCError(grpc.status.INTERNAL, message); -} - -function mapTransportError(err: grpc.ServiceError): Error { - if (err.code === grpc.status.NOT_FOUND) { - return new QueueNotFoundError(`enqueue: ${err.details}`); - } - return new RPCError(err.code, err.details); -} - /** * Background batcher that collects enqueue() calls and flushes them - * via the unified Enqueue RPC (which accepts repeated messages). + * via the FIBP Enqueue opcode (batch-native). * Supports auto (opportunistic) and linger (timer-based) modes. */ export class Batcher { - private readonly grpcClient: FilaServiceClient; - private readonly callMetadata: () => grpc.Metadata; + private readonly conn: Connection; private readonly batchMode: BatchMode; private readonly maxBatchSize: number; @@ -54,13 +40,8 @@ export class Batcher { private lingerTimer: ReturnType | null = null; private inFlightCount = 0; - constructor( - grpcClient: FilaServiceClient, - callMetadata: () => grpc.Metadata, - batchMode: BatchMode - ) { - this.grpcClient = grpcClient; - this.callMetadata = callMetadata; + constructor(conn: Connection, batchMode: BatchMode) { + this.conn = conn; this.batchMode = batchMode; if (batchMode.mode === "auto") { @@ -72,15 +53,10 @@ export class Batcher { } } - /** - * Submit a message for batched enqueue. Returns a promise that resolves - * with the message ID when the batch containing this message is flushed. - */ + /** Submit a message for batched enqueue. */ submit(message: EnqueueMessage): Promise { if (this.closed) { - return Promise.reject( - new RPCError(grpc.status.UNAVAILABLE, "batcher is closed") - ); + return Promise.reject(new ProtocolError(0xff, "batcher is closed")); } return new Promise((resolve, reject) => { @@ -89,10 +65,7 @@ export class Batcher { }); } - /** - * Drain all pending messages before closing. Returns a promise that - * resolves when all pending messages have been flushed. - */ + /** Drain all pending messages before closing. */ async drain(): Promise { this.closed = true; @@ -101,7 +74,7 @@ export class Batcher { this.lingerTimer = null; } - if (this.pending.length === 0) { + if (this.pending.length === 0 && this.inFlightCount === 0) { return; } @@ -119,12 +92,6 @@ export class Batcher { } } - /** - * Auto mode: schedule a flush via setImmediate. Messages that arrive - * within the same event loop turn will cluster into the same batch. - * At low load, each message is sent individually. At high load, - * messages naturally batch together. - */ private scheduleAutoFlush(): void { if (this.flushScheduled) return; this.flushScheduled = true; @@ -135,14 +102,9 @@ export class Batcher { }); } - /** - * Linger mode: start a timer on the first message. Flush when the batch - * fills or the timer fires, whichever comes first. - */ private scheduleLingerFlush(): void { if (this.batchMode.mode !== "linger") return; - // If batch is full, flush immediately. if (this.pending.length >= this.batchMode.batchSize) { if (this.lingerTimer !== null) { clearTimeout(this.lingerTimer); @@ -152,7 +114,6 @@ export class Batcher { return; } - // Start timer if not already running. if (this.lingerTimer === null) { this.lingerTimer = setTimeout(() => { this.lingerTimer = null; @@ -161,9 +122,6 @@ export class Batcher { } } - /** - * Flush all pending items, splitting into maxBatchSize chunks. - */ private flushAll(): void { while (this.pending.length > 0) { const items = this.pending.splice(0, this.maxBatchSize); @@ -173,7 +131,6 @@ export class Batcher { this.notifyDrainComplete(); }); } - // Also check drain in case pending was already empty and nothing in-flight. this.notifyDrainComplete(); } @@ -186,59 +143,59 @@ export class Batcher { } } - /** - * Flush a batch of items via the unified Enqueue RPC (repeated messages). - * All items -- single or multiple -- use the same RPC. - */ - private flushBatch(items: BatchItem[]): Promise { - if (items.length === 0) return Promise.resolve(); + private async flushBatch(items: BatchItem[]): Promise { + if (items.length === 0) return; - const messages = items.map((item) => ({ - queue: item.message.queue, - headers: item.message.headers, - payload: item.message.payload, - })); + const enc = new Encoder(256); + enc.writeU32(items.length); + for (const item of items) { + enc.writeString(item.message.queue); + enc.writeMap(item.message.headers); + enc.writeBytes(item.message.payload); + } - return new Promise((resolve) => { - this.grpcClient.enqueue( - { messages }, - this.callMetadata(), - (err: grpc.ServiceError | null, resp?: EnqueueResponse__Output) => { - if (err) { - // Transport-level failure: all items get the error. - const mapped = mapTransportError(err); - for (const item of items) { - item.reject(mapped); - } - } else { - const results = resp!.results; - for (let i = 0; i < items.length; i++) { - const result = results[i]; - if (!result) { - items[i].reject( - new RPCError( - grpc.status.INTERNAL, - "server returned fewer results than messages sent" - ) - ); - continue; - } - if (result.result === "messageId" && result.messageId) { - items[i].resolve(result.messageId); - } else if (result.result === "error" && result.error) { - items[i].reject( - mapResultError(result.error.code, result.error.message) - ); - } else { - items[i].reject( - new RPCError(grpc.status.INTERNAL, "no result from server") - ); - } - } - } - resolve(); + try { + const resp = await this.conn.sendRequest(OP_ENQUEUE, enc.finish()); + + if (resp.opcode === OP_ERROR) { + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + const message = dec.readString(); + const metadata = dec.readMap(); + const err = mapErrorCode(errorCode, message, metadata); + for (const item of items) { + item.reject(err); } - ); - }); + return; + } + + if (resp.opcode !== OP_ENQUEUE_RESULT) { + const err = new ProtocolError(0xff, `unexpected response opcode: 0x${resp.opcode.toString(16)}`); + for (const item of items) { + item.reject(err); + } + return; + } + + const dec = new Decoder(resp.payload); + const count = dec.readU32(); + for (let i = 0; i < items.length; i++) { + if (i >= count) { + items[i].reject(new ProtocolError(0xff, "server returned fewer results than messages sent")); + continue; + } + const errorCode = dec.readU8(); + const messageId = dec.readString(); + if (errorCode === ERR_OK) { + items[i].resolve(messageId); + } else { + items[i].reject(mapItemErrorCode(errorCode, "enqueue")); + } + } + } catch (err) { + for (const item of items) { + item.reject(err instanceof Error ? err : new Error(String(err))); + } + } } } diff --git a/src/client.ts b/src/client.ts index 2cc4a75..5d53d34 100644 --- a/src/client.ts +++ b/src/client.ts @@ -1,172 +1,82 @@ -import * as grpc from "@grpc/grpc-js"; -import * as protoLoader from "@grpc/proto-loader"; -import * as fs from "fs"; -import * as path from "path"; - +import { + Encoder, + Decoder, + OP_ENQUEUE, + OP_ENQUEUE_RESULT, + OP_CONSUME, + OP_CONSUME_OK, + OP_CANCEL_CONSUME, + OP_ACK, + OP_ACK_RESULT, + OP_NACK, + OP_NACK_RESULT, + OP_ERROR, + OP_CREATE_QUEUE, + OP_CREATE_QUEUE_RESULT, + OP_DELETE_QUEUE, + OP_DELETE_QUEUE_RESULT, + OP_GET_STATS, + OP_GET_STATS_RESULT, + OP_LIST_QUEUES, + OP_LIST_QUEUES_RESULT, + OP_SET_CONFIG, + OP_SET_CONFIG_RESULT, + OP_GET_CONFIG, + OP_GET_CONFIG_RESULT, + OP_LIST_CONFIG, + OP_LIST_CONFIG_RESULT, + OP_REDRIVE, + OP_REDRIVE_RESULT, + OP_CREATE_API_KEY, + OP_CREATE_API_KEY_RESULT, + OP_REVOKE_API_KEY, + OP_REVOKE_API_KEY_RESULT, + OP_LIST_API_KEYS, + OP_LIST_API_KEYS_RESULT, + OP_SET_ACL, + OP_SET_ACL_RESULT, + OP_GET_ACL, + OP_GET_ACL_RESULT, + ERR_OK, +} from "./fibp"; +import type { Frame } from "./fibp"; +import { Connection } from "./connection"; +import type { ConnectionOptions } from "./connection"; +import { Batcher, type BatchMode } from "./batcher"; import { FilaError, - MessageNotFoundError, - QueueNotFoundError, - RPCError, + mapErrorCode, + mapItemErrorCode, + NotLeaderError, + ProtocolError, } from "./errors"; -import type { ConsumeMessage, EnqueueMessage, EnqueueResult } from "./types"; -import type { FilaServiceClient } from "../generated/fila/v1/FilaService"; -import type { EnqueueResponse__Output } from "../generated/fila/v1/EnqueueResponse"; -import type { AckResponse__Output } from "../generated/fila/v1/AckResponse"; -import type { NackResponse__Output } from "../generated/fila/v1/NackResponse"; -import type { ConsumeResponse__Output } from "../generated/fila/v1/ConsumeResponse"; -import { Batcher, type BatchMode } from "./batcher"; - -function resolveProtoDir(): string { - // Source (dev/test): __dirname = /src/ - const devPath = path.join(__dirname, "..", "proto"); - if (fs.existsSync(devPath)) return devPath; - // Built (dist): __dirname = /dist/src/ - return path.join(__dirname, "..", "..", "proto"); -} - -const PROTO_DIR = resolveProtoDir(); - -function loadServiceProto(): grpc.GrpcObject { - const packageDefinition = protoLoader.loadSync( - [ - path.join(PROTO_DIR, "fila", "v1", "service.proto"), - path.join(PROTO_DIR, "fila", "v1", "messages.proto"), - ], - { - keepCase: false, - longs: String, - enums: String, - defaults: true, - oneofs: true, - includeDirs: [PROTO_DIR], - } - ); - return grpc.loadPackageDefinition(packageDefinition); -} - -/** Metadata key the server uses to indicate the current queue leader address. */ -const LEADER_ADDR_KEY = "x-fila-leader-addr"; - -/** - * Extract the leader address from a gRPC UNAVAILABLE error's trailing metadata. - * Returns the address string, or undefined if not present. - */ -function extractLeaderAddr(err: grpc.ServiceError): string | undefined { - if (err.code !== grpc.status.UNAVAILABLE) return undefined; - const values = err.metadata?.get(LEADER_ADDR_KEY); - if (values && values.length > 0) { - return String(values[0]); - } - return undefined; -} - -/** Create a FilaServiceClient for the given address and credentials. */ -function createGrpcClient( - addr: string, - creds: grpc.ChannelCredentials -): FilaServiceClient { - const proto = loadServiceProto(); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - const FilaService = (proto.fila as any).v1 - .FilaService as grpc.ServiceClientConstructor; - return new FilaService(addr, creds) as unknown as FilaServiceClient; -} - -function mapEnqueueError(err: grpc.ServiceError): FilaError { - if (err.code === grpc.status.NOT_FOUND) { - return new QueueNotFoundError(`enqueue: ${err.details}`); - } - return new RPCError(err.code, err.details); -} - -function mapConsumeError(err: grpc.ServiceError): FilaError { - if (err.code === grpc.status.NOT_FOUND) { - return new QueueNotFoundError(`consume: ${err.details}`); - } - return new RPCError(err.code, err.details); -} - -/** - * Map a per-message EnqueueResult error code to an SDK error type. - */ -function mapEnqueueResultError(code: string, message: string): FilaError { - if (code === "ENQUEUE_ERROR_CODE_QUEUE_NOT_FOUND") { - return new QueueNotFoundError(`enqueue: ${message}`); - } - return new RPCError(grpc.status.INTERNAL, message); -} - -/** - * Map a per-message AckResult error code to an SDK error type. - */ -function mapAckResultError(code: string, message: string): FilaError { - if (code === "ACK_ERROR_CODE_MESSAGE_NOT_FOUND") { - return new MessageNotFoundError(`ack: ${message}`); - } - return new RPCError(grpc.status.INTERNAL, message); -} - -/** - * Map a per-message NackResult error code to an SDK error type. - */ -function mapNackResultError(code: string, message: string): FilaError { - if (code === "NACK_ERROR_CODE_MESSAGE_NOT_FOUND") { - return new MessageNotFoundError(`nack: ${message}`); - } - return new RPCError(grpc.status.INTERNAL, message); -} - -/** Map a ConsumeResponse to ConsumeMessage(s), skipping keepalive frames. */ -function mapConsumeResponse( - resp: ConsumeResponse__Output -): ConsumeMessage[] { - if (!resp.messages || resp.messages.length === 0) { - return []; // keepalive frame - } - - const results: ConsumeMessage[] = []; - for (const msg of resp.messages) { - if (!msg || !msg.id) continue; - const metadata = msg.metadata; - results.push({ - id: msg.id, - headers: msg.headers ?? {}, - payload: Buffer.isBuffer(msg.payload) - ? msg.payload - : Buffer.from(msg.payload ?? ""), - fairnessKey: metadata?.fairnessKey ?? "", - attemptCount: metadata?.attemptCount ?? 0, - queue: metadata?.queueId ?? "", - }); - } - return results; -} +import type { + ConsumeMessage, + EnqueueMessage, + EnqueueResult, + QueueStats, + QueueInfo, + ApiKeyInfo, + AclPermission, +} from "./types"; /** Connection options for TLS, authentication, and batching. */ export interface ClientOptions { - /** - * Enable TLS using the OS system trust store for server verification. - * When `true` and `caCert` is not provided, the system root certificates - * are used automatically. When `caCert` is provided, this is implied. - */ + /** Enable TLS using the OS system trust store. */ tls?: boolean; /** CA certificate PEM for server verification. When set, enables TLS. */ caCert?: Buffer; - /** Client certificate PEM for mutual TLS (mTLS). Requires TLS to be enabled (via `tls: true` or `caCert`). */ + /** Client certificate PEM for mutual TLS (mTLS). */ clientCert?: Buffer; - /** Client private key PEM for mutual TLS (mTLS). Requires TLS to be enabled (via `tls: true` or `caCert`). */ + /** Client private key PEM for mutual TLS (mTLS). */ clientKey?: Buffer; - /** API key for authentication. Sent as `authorization: Bearer ` metadata on every RPC. */ + /** API key for authentication. Sent in the FIBP handshake. */ apiKey?: string; /** * Batch mode for enqueue() calls. - * - * - `'auto'` (DEFAULT): Opportunistic batching via setImmediate. Zero config, - * zero latency penalty at low load. Messages cluster naturally at high load. - * - `'linger'`: Timer-based batching with explicit `lingerMs` and `batchSize`. - * - `'disabled'`: No batching. Each enqueue() is a direct RPC. - * + * - 'auto' (DEFAULT): Opportunistic batching via setImmediate. + * - 'linger': Timer-based batching with explicit lingerMs and batchSize. + * - 'disabled': No batching. Each enqueue() is a direct request. * @default 'auto' */ batchMode?: "auto" | "linger" | "disabled"; @@ -178,16 +88,36 @@ export interface ClientOptions { batchSize?: number; } +/** Parse "host:port" into components. Default port 5555. */ +function parseAddr(addr: string): { host: string; port: number } { + const lastColon = addr.lastIndexOf(":"); + if (lastColon === -1) return { host: addr, port: 5555 }; + const host = addr.substring(0, lastColon); + const port = parseInt(addr.substring(lastColon + 1), 10); + return { host, port: isNaN(port) ? 5555 : port }; +} + +/** Decode an Error frame payload into an SDK error. */ +function decodeErrorFrame(frame: Frame): FilaError { + const dec = new Decoder(frame.payload); + const code = dec.readU8(); + const message = dec.readString(); + const metadata = dec.readMap(); + return mapErrorCode(code, message, metadata); +} + +/** Check if a response frame is an Error frame and throw if so. */ +function assertNotError(frame: Frame): void { + if (frame.opcode === OP_ERROR) throw decodeErrorFrame(frame); +} + /** - * Client for the Fila message broker. - * - * Wraps the hot-path gRPC operations: enqueue, consume, ack, nack. - * By default, enqueue() calls are automatically batched for optimal throughput - * with zero added latency at low load. + * Client for the Fila message broker using the FIBP binary protocol. * * @example * ```typescript * const client = new Client("localhost:5555"); + * await client.connect(); * const msgId = await client.enqueue("my-queue", { tenant: "acme" }, Buffer.from("hello")); * for await (const msg of client.consume("my-queue")) { * await client.ack("my-queue", msg.id); @@ -196,13 +126,15 @@ export interface ClientOptions { * ``` */ export class Client { - private readonly grpcClient: FilaServiceClient; - private readonly creds: grpc.ChannelCredentials; - private readonly apiKey?: string; + private conn: Connection; + private readonly addr: string; + private readonly connOpts: ConnectionOptions; private readonly batcher: Batcher | null; + private readonly batchModeConfig: "auto" | "linger" | "disabled"; + private readonly clientOptions: ClientOptions; /** - * Connect to a Fila broker at the given address. + * Create a client for the given address. Call `connect()` to establish the connection. * @param addr - Broker address in "host:port" format (e.g., "localhost:5555"). * @param options - Optional TLS, authentication, and batching settings. */ @@ -218,32 +150,24 @@ export class Client { throw new Error("clientCert and clientKey must be provided together"); } - if (options?.caCert) { - this.creds = grpc.credentials.createSsl( - options.caCert, - options.clientKey ?? null, - options.clientCert ?? null - ); - } else if (tlsEnabled) { - this.creds = grpc.credentials.createSsl( - null, - options?.clientKey ?? null, - options?.clientCert ?? null - ); - } else { - this.creds = grpc.credentials.createInsecure(); - } - - this.grpcClient = createGrpcClient(addr, this.creds); - this.apiKey = options?.apiKey; - - // Initialize the batcher based on the configured mode. - const modeStr = options?.batchMode ?? "auto"; - if (modeStr === "disabled") { + this.addr = addr; + this.clientOptions = options ?? {}; + const { host, port } = parseAddr(addr); + this.connOpts = { + tls: options?.tls, + caCert: options?.caCert, + clientCert: options?.clientCert, + clientKey: options?.clientKey, + apiKey: options?.apiKey, + }; + this.conn = new Connection(host, port, this.connOpts); + + this.batchModeConfig = options?.batchMode ?? "auto"; + if (this.batchModeConfig === "disabled") { this.batcher = null; } else { let batchMode: BatchMode; - if (modeStr === "linger") { + if (this.batchModeConfig === "linger") { if (options?.lingerMs === undefined || options?.batchSize === undefined) { throw new Error("lingerMs and batchSize are required when batchMode is 'linger'"); } @@ -258,297 +182,675 @@ export class Client { maxBatchSize: options?.maxBatchSize, }; } - this.batcher = new Batcher( - this.grpcClient, - () => this.callMetadata(), - batchMode - ); + this.batcher = new Batcher(this.conn, batchMode); } } - /** Build gRPC metadata, attaching the API key if configured. */ - private callMetadata(): grpc.Metadata { - const md = new grpc.Metadata(); - if (this.apiKey) { - md.set("authorization", `Bearer ${this.apiKey}`); - } - return md; + /** Establish the TCP (+ optional TLS) connection and perform the FIBP handshake. */ + async connect(): Promise { + await this.conn.connect(); } - /** - * Close the client, draining any pending batched messages first. - * Returns a promise that resolves when all pending messages have been - * flushed and the gRPC channel is closed. - */ + /** Close the client, draining any pending batched messages first. */ async close(): Promise { if (this.batcher) { await this.batcher.drain(); } - (this.grpcClient as unknown as grpc.Client).close(); + await this.conn.close(); } + // --------------------------------------------------------------------------- + // Hot-path operations + // --------------------------------------------------------------------------- + /** * Enqueue a message to the specified queue. - * - * When batching is enabled (default), the message is routed through the - * batcher. At low load, messages are sent individually. At high load, - * messages cluster naturally into larger Enqueue RPCs. - * - * @param queue - Target queue name. - * @param headers - Optional message headers. - * @param payload - Message payload bytes. - * @returns Broker-assigned message ID (UUIDv7). - * @throws {QueueNotFoundError} If the queue does not exist. - * @throws {RPCError} For unexpected gRPC failures. + * When batching is enabled (default), the message is routed through the batcher. */ enqueue( queue: string, headers: Record | null, payload: Buffer ): Promise { - // Route through the batcher when enabled. if (this.batcher) { - return this.batcher.submit({ - queue, - headers: headers ?? {}, - payload, - }); + return this.batcher.submit({ queue, headers: headers ?? {}, payload }); } - // No batching: direct RPC with single message in the repeated field. - return new Promise((resolve, reject) => { - this.grpcClient.enqueue( - { messages: [{ queue, headers: headers ?? {}, payload }] }, - this.callMetadata(), - (err: grpc.ServiceError | null, resp?: EnqueueResponse__Output) => { - if (err) { - reject(mapEnqueueError(err)); - return; - } - const result = resp!.results[0]; - if (!result) { - reject(new RPCError(grpc.status.INTERNAL, "no result from server")); - return; - } - if (result.result === "messageId" && result.messageId) { - resolve(result.messageId); - } else if (result.result === "error" && result.error) { - reject(mapEnqueueResultError(result.error.code, result.error.message)); - } else { - reject(new RPCError(grpc.status.INTERNAL, "no result from server")); - } - } - ); - }); + return this.enqueueDirect(queue, headers ?? {}, payload); + } + + private async enqueueDirect( + queue: string, + headers: Record, + payload: Buffer + ): Promise { + const enc = new Encoder(128); + enc.writeU32(1); // message_count + enc.writeString(queue); + enc.writeMap(headers); + enc.writeBytes(payload); + + const resp = await this.conn.sendRequest(OP_ENQUEUE, enc.finish()); + assertNotError(resp); + + if (resp.opcode !== OP_ENQUEUE_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const count = dec.readU32(); + if (count < 1) throw new ProtocolError(0xff, "no result from server"); + const errorCode = dec.readU8(); + const messageId = dec.readString(); + if (errorCode !== ERR_OK) { + throw mapItemErrorCode(errorCode, "enqueue"); + } + return messageId; } /** - * Enqueue multiple messages in a single RPC call. - * - * Each message is independently validated and processed. A failed message - * does not affect the others. Returns one result per input message, - * in the same order. - * - * This always bypasses the batcher and issues a direct Enqueue RPC. - * - * @param messages - Array of messages to enqueue. - * @returns Per-message results (success with messageId, or error with description). - * @throws {RPCError} For transport-level failures affecting the entire call. + * Enqueue multiple messages in a single call. + * Always bypasses the batcher. */ - enqueueMany(messages: EnqueueMessage[]): Promise { - const protoMessages = messages.map((m) => ({ - queue: m.queue, - headers: m.headers, - payload: m.payload, - })); - - return new Promise((resolve, reject) => { - this.grpcClient.enqueue( - { messages: protoMessages }, - this.callMetadata(), - (err: grpc.ServiceError | null, resp?: EnqueueResponse__Output) => { - if (err) { - reject(new RPCError(err.code, err.details)); - return; - } + async enqueueMany(messages: EnqueueMessage[]): Promise { + const enc = new Encoder(256); + enc.writeU32(messages.length); + for (const m of messages) { + enc.writeString(m.queue); + enc.writeMap(m.headers); + enc.writeBytes(m.payload); + } - const results: EnqueueResult[] = resp!.results.map((r) => { - if (r.result === "messageId" && r.messageId) { - return { - success: true as const, - messageId: r.messageId, - }; - } else if (r.result === "error" && r.error) { - return { - success: false as const, - error: r.error.message, - }; - } else { - return { - success: false as const, - error: "no result from server", - }; - } - }); + const resp = await this.conn.sendRequest(OP_ENQUEUE, enc.finish()); - resolve(results); - } - ); - }); + if (resp.opcode === OP_ERROR) { + throw decodeErrorFrame(resp); + } + + if (resp.opcode !== OP_ENQUEUE_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const count = dec.readU32(); + const results: EnqueueResult[] = []; + for (let i = 0; i < messages.length; i++) { + if (i >= count) { + results.push({ success: false, error: "server returned fewer results than messages sent" }); + continue; + } + const errorCode = dec.readU8(); + const messageId = dec.readString(); + if (errorCode === ERR_OK) { + results.push({ success: true, messageId }); + } else { + const name = errorCode.toString(16); + results.push({ success: false, error: `error code 0x${name}: ${messageId || "unknown"}` }); + } + } + return results; } /** * Open a streaming consumer on the specified queue. - * * Returns an async iterable that yields messages as they become available. - * Empty response frames (keepalive signals) are skipped automatically. - * Delivery frames containing multiple messages are transparently unpacked - * into individual messages. - * - * If the server returns UNAVAILABLE with an `x-fila-leader-addr` metadata - * header, the client transparently reconnects to the leader node and retries - * the consume stream once (max 1 redirect per call). - * - * @param queue - Queue to consume from. - * @throws {QueueNotFoundError} If the queue does not exist. - * @throws {RPCError} For unexpected gRPC failures. */ async *consume(queue: string): AsyncIterable { yield* this.consumeInner(queue, false); } - /** - * Inner consume implementation that optionally follows a leader hint. - * @param redirected - true if this is already a redirected attempt (prevents loops). - */ private async *consumeInner( queue: string, redirected: boolean ): AsyncIterable { - const stream = this.grpcClient.consume({ queue }, this.callMetadata()); - const iterable = stream as AsyncIterable; + const enc = new Encoder(64); + enc.writeString(queue); + + const requestId = this.conn.allocRequestId(); + + // Set up a queue for delivery frames. + const deliveryQueue: Frame[] = []; + let deliveryResolve: (() => void) | null = null; + let streamClosed = false; + + this.conn.registerConsumeHandler(requestId, (frame) => { + if (frame.opcode === 0 && frame.payload.length === 0) { + // Connection closed signal. + streamClosed = true; + if (deliveryResolve) { + deliveryResolve(); + deliveryResolve = null; + } + return; + } + deliveryQueue.push(frame); + if (deliveryResolve) { + deliveryResolve(); + deliveryResolve = null; + } + }); try { - for await (const resp of iterable) { - const messages = mapConsumeResponse(resp); - for (const msg of messages) { - yield msg; + // Send Consume request and wait for ConsumeOk. + const consumeResp = await this.conn.sendRequestWithId( + OP_CONSUME, requestId, enc.finish() + ); + + if (consumeResp.opcode === OP_ERROR) { + const err = decodeErrorFrame(consumeResp); + if (!redirected && err instanceof NotLeaderError && err.leaderAddr) { + // Follow leader hint. + this.conn.unregisterConsumeHandler(requestId); + yield* this.consumeFromLeader(queue, err.leaderAddr); + return; } + throw err; + } + + if (consumeResp.opcode !== OP_CONSUME_OK) { + throw new ProtocolError(0xff, `unexpected consume response: 0x${consumeResp.opcode.toString(16)}`); } - } catch (err) { - const svcErr = err as grpc.ServiceError; - - // If we haven't redirected yet and the server tells us who the leader is, - // open a new connection to the leader and retry the consume stream. - if (!redirected) { - const leaderAddr = extractLeaderAddr(svcErr); - if (leaderAddr) { - stream.cancel(); - const leaderClient = createGrpcClient(leaderAddr, this.creds); - const leaderStream = leaderClient.consume( - { queue }, - this.callMetadata() - ); - const leaderIterable = - leaderStream as AsyncIterable; - try { - for await (const resp of leaderIterable) { - const messages = mapConsumeResponse(resp); - for (const msg of messages) { - yield msg; - } - } - } catch (retryErr) { - const retrySvcErr = retryErr as grpc.ServiceError; - if ( - retrySvcErr.code !== undefined && - retrySvcErr.code !== grpc.status.CANCELLED - ) { - throw mapConsumeError(retrySvcErr); - } - } finally { - leaderStream.cancel(); - (leaderClient as unknown as grpc.Client).close(); + + // Now yield deliveries until stream closes. + while (!streamClosed && !this.conn.isClosed) { + if (deliveryQueue.length === 0) { + await new Promise((resolve) => { + deliveryResolve = resolve; + }); + } + + while (deliveryQueue.length > 0) { + const frame = deliveryQueue.shift()!; + const messages = decodeDelivery(frame); + for (const msg of messages) { + yield msg; } - return; } } + } finally { + // Send CancelConsume. + this.conn.sendFireAndForget(OP_CANCEL_CONSUME, requestId, Buffer.alloc(0)); + this.conn.unregisterConsumeHandler(requestId); + } + } - if (svcErr.code !== undefined && svcErr.code !== grpc.status.CANCELLED) { - throw mapConsumeError(svcErr); + private async *consumeFromLeader( + queue: string, + leaderAddr: string + ): AsyncIterable { + const { host, port } = parseAddr(leaderAddr); + const leaderConn = new Connection(host, port, this.connOpts); + try { + await leaderConn.connect(); + const enc = new Encoder(64); + enc.writeString(queue); + const requestId = leaderConn.allocRequestId(); + + const deliveryQueue: Frame[] = []; + let deliveryResolve: (() => void) | null = null; + let streamClosed = false; + + leaderConn.registerConsumeHandler(requestId, (frame) => { + if (frame.opcode === 0 && frame.payload.length === 0) { + streamClosed = true; + if (deliveryResolve) { + deliveryResolve(); + deliveryResolve = null; + } + return; + } + deliveryQueue.push(frame); + if (deliveryResolve) { + deliveryResolve(); + deliveryResolve = null; + } + }); + + const consumeResp = await leaderConn.sendRequestWithId( + OP_CONSUME, requestId, enc.finish() + ); + assertNotError(consumeResp); + + while (!streamClosed && !leaderConn.isClosed) { + if (deliveryQueue.length === 0) { + await new Promise((resolve) => { + deliveryResolve = resolve; + }); + } + while (deliveryQueue.length > 0) { + const frame = deliveryQueue.shift()!; + const messages = decodeDelivery(frame); + for (const msg of messages) { + yield msg; + } + } } - // Stream cancelled or closed normally — just return. + + leaderConn.sendFireAndForget(OP_CANCEL_CONSUME, requestId, Buffer.alloc(0)); + leaderConn.unregisterConsumeHandler(requestId); } finally { - stream.cancel(); + await leaderConn.close(); } } /** * Acknowledge a successfully processed message. - * @param queue - Queue the message belongs to. - * @param msgId - ID of the message to acknowledge. - * @throws {MessageNotFoundError} If the message does not exist. - * @throws {RPCError} For unexpected gRPC failures. */ - ack(queue: string, msgId: string): Promise { - return new Promise((resolve, reject) => { - this.grpcClient.ack( - { messages: [{ queue, messageId: msgId }] }, - this.callMetadata(), - (err: grpc.ServiceError | null, resp?: AckResponse__Output) => { - if (err) { - reject(new RPCError(err.code, err.details)); - return; - } - const result = resp!.results[0]; - if (!result) { - reject(new RPCError(grpc.status.INTERNAL, "no result from server")); - return; - } - if (result.result === "success") { - resolve(); - } else if (result.result === "error" && result.error) { - reject(mapAckResultError(result.error.code, result.error.message)); - } else { - reject(new RPCError(grpc.status.INTERNAL, "no result from server")); - } - } - ); - }); + async ack(queue: string, msgId: string): Promise { + const enc = new Encoder(64); + enc.writeU32(1); // item_count + enc.writeString(queue); + enc.writeString(msgId); + + const resp = await this.conn.sendRequest(OP_ACK, enc.finish()); + assertNotError(resp); + + if (resp.opcode !== OP_ACK_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const count = dec.readU32(); + if (count < 1) throw new ProtocolError(0xff, "no result from server"); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) { + throw mapItemErrorCode(errorCode, "ack"); + } } /** * Negatively acknowledge a message that failed processing. - * @param queue - Queue the message belongs to. - * @param msgId - ID of the message to nack. - * @param error - Description of the failure. - * @throws {MessageNotFoundError} If the message does not exist. - * @throws {RPCError} For unexpected gRPC failures. */ - nack(queue: string, msgId: string, error: string): Promise { - return new Promise((resolve, reject) => { - this.grpcClient.nack( - { messages: [{ queue, messageId: msgId, error }] }, - this.callMetadata(), - (err: grpc.ServiceError | null, resp?: NackResponse__Output) => { - if (err) { - reject(new RPCError(err.code, err.details)); - return; - } - const result = resp!.results[0]; - if (!result) { - reject(new RPCError(grpc.status.INTERNAL, "no result from server")); - return; - } - if (result.result === "success") { - resolve(); - } else if (result.result === "error" && result.error) { - reject(mapNackResultError(result.error.code, result.error.message)); - } else { - reject(new RPCError(grpc.status.INTERNAL, "no result from server")); - } - } - ); + async nack(queue: string, msgId: string, error: string): Promise { + const enc = new Encoder(64); + enc.writeU32(1); // item_count + enc.writeString(queue); + enc.writeString(msgId); + enc.writeString(error); + + const resp = await this.conn.sendRequest(OP_NACK, enc.finish()); + assertNotError(resp); + + if (resp.opcode !== OP_NACK_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const count = dec.readU32(); + if (count < 1) throw new ProtocolError(0xff, "no result from server"); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) { + throw mapItemErrorCode(errorCode, "nack"); + } + } + + // --------------------------------------------------------------------------- + // Admin operations + // --------------------------------------------------------------------------- + + /** Create a queue with optional scripts and visibility timeout. */ + async createQueue( + name: string, + opts?: { + onEnqueueScript?: string; + onFailureScript?: string; + visibilityTimeoutMs?: number; + } + ): Promise { + const enc = new Encoder(128); + enc.writeString(name); + enc.writeOptionalString(opts?.onEnqueueScript ?? null); + enc.writeOptionalString(opts?.onFailureScript ?? null); + enc.writeU64(BigInt(opts?.visibilityTimeoutMs ?? 0)); + + const resp = await this.conn.sendRequest(OP_CREATE_QUEUE, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_CREATE_QUEUE_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + const queueId = dec.readString(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "createQueue"); + return queueId; + } + + /** Delete a queue. */ + async deleteQueue(queue: string): Promise { + const enc = new Encoder(64); + enc.writeString(queue); + + const resp = await this.conn.sendRequest(OP_DELETE_QUEUE, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_DELETE_QUEUE_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "deleteQueue"); + } + + /** Get queue statistics. */ + async getStats(queue: string): Promise { + const enc = new Encoder(64); + enc.writeString(queue); + + const resp = await this.conn.sendRequest(OP_GET_STATS, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_GET_STATS_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "getStats"); + + const depth = dec.readU64(); + const inFlight = dec.readU64(); + const activeFairnessKeys = dec.readU64(); + const activeConsumers = dec.readU32(); + const quantum = dec.readU32(); + const leaderNodeId = dec.readU64(); + const replicationCount = dec.readU32(); + + const perKeyCount = dec.readU16(); + const perKeyStats = []; + for (let i = 0; i < perKeyCount; i++) { + perKeyStats.push({ + key: dec.readString(), + pendingCount: dec.readU64(), + currentDeficit: dec.readI64(), + weight: dec.readU32(), + }); + } + + const perThrottleCount = dec.readU16(); + const perThrottleStats = []; + for (let i = 0; i < perThrottleCount; i++) { + perThrottleStats.push({ + key: dec.readString(), + tokens: dec.readF64(), + ratePerSecond: dec.readF64(), + burst: dec.readF64(), + }); + } + + return { + depth, + inFlight, + activeFairnessKeys, + activeConsumers, + quantum, + leaderNodeId, + replicationCount, + perKeyStats, + perThrottleStats, + }; + } + + /** List all queues. */ + async listQueues(): Promise<{ clusterNodeCount: number; queues: QueueInfo[] }> { + const resp = await this.conn.sendRequest(OP_LIST_QUEUES, Buffer.alloc(0)); + assertNotError(resp); + if (resp.opcode !== OP_LIST_QUEUES_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "listQueues"); + + const clusterNodeCount = dec.readU32(); + const queueCount = dec.readU16(); + const queues: QueueInfo[] = []; + for (let i = 0; i < queueCount; i++) { + queues.push({ + name: dec.readString(), + depth: dec.readU64(), + inFlight: dec.readU64(), + activeConsumers: dec.readU32(), + leaderNodeId: dec.readU64(), + }); + } + return { clusterNodeCount, queues }; + } + + /** Set a runtime config key. */ + async setConfig(key: string, value: string): Promise { + const enc = new Encoder(64); + enc.writeString(key); + enc.writeString(value); + + const resp = await this.conn.sendRequest(OP_SET_CONFIG, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_SET_CONFIG_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "setConfig"); + } + + /** Get a runtime config value. */ + async getConfig(key: string): Promise { + const enc = new Encoder(64); + enc.writeString(key); + + const resp = await this.conn.sendRequest(OP_GET_CONFIG, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_GET_CONFIG_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "getConfig"); + return dec.readString(); + } + + /** List config keys by prefix. */ + async listConfig(prefix: string): Promise> { + const enc = new Encoder(64); + enc.writeString(prefix); + + const resp = await this.conn.sendRequest(OP_LIST_CONFIG, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_LIST_CONFIG_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "listConfig"); + + const count = dec.readU16(); + const entries = []; + for (let i = 0; i < count; i++) { + entries.push({ key: dec.readString(), value: dec.readString() }); + } + return entries; + } + + /** Redrive messages from a DLQ back to the parent queue. */ + async redrive(dlqQueue: string, count: bigint): Promise { + const enc = new Encoder(64); + enc.writeString(dlqQueue); + enc.writeU64(count); + + const resp = await this.conn.sendRequest(OP_REDRIVE, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_REDRIVE_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "redrive"); + return dec.readU64(); + } + + // --------------------------------------------------------------------------- + // Auth operations + // --------------------------------------------------------------------------- + + /** Create an API key. */ + async createApiKey( + name: string, + opts?: { expiresAtMs?: bigint; isSuperadmin?: boolean } + ): Promise<{ keyId: string; key: string; isSuperadmin: boolean }> { + const enc = new Encoder(64); + enc.writeString(name); + enc.writeU64(opts?.expiresAtMs ?? BigInt(0)); + enc.writeBool(opts?.isSuperadmin ?? false); + + const resp = await this.conn.sendRequest(OP_CREATE_API_KEY, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_CREATE_API_KEY_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "createApiKey"); + const keyId = dec.readString(); + const key = dec.readString(); + const isSuperadmin = dec.readBool(); + return { keyId, key, isSuperadmin }; + } + + /** Revoke an API key. */ + async revokeApiKey(keyId: string): Promise { + const enc = new Encoder(64); + enc.writeString(keyId); + + const resp = await this.conn.sendRequest(OP_REVOKE_API_KEY, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_REVOKE_API_KEY_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "revokeApiKey"); + } + + /** List all API keys. */ + async listApiKeys(): Promise { + const resp = await this.conn.sendRequest(OP_LIST_API_KEYS, Buffer.alloc(0)); + assertNotError(resp); + if (resp.opcode !== OP_LIST_API_KEYS_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "listApiKeys"); + + const count = dec.readU16(); + const keys: ApiKeyInfo[] = []; + for (let i = 0; i < count; i++) { + keys.push({ + keyId: dec.readString(), + name: dec.readString(), + createdAtMs: dec.readU64(), + expiresAtMs: dec.readU64(), + isSuperadmin: dec.readBool(), + }); + } + return keys; + } + + /** Set ACL permissions for an API key. */ + async setAcl(keyId: string, permissions: AclPermission[]): Promise { + const enc = new Encoder(128); + enc.writeString(keyId); + enc.writeU16(permissions.length); + for (const p of permissions) { + enc.writeString(p.kind); + enc.writeString(p.pattern); + } + + const resp = await this.conn.sendRequest(OP_SET_ACL, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_SET_ACL_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "setAcl"); + } + + /** Get ACL permissions for an API key. */ + async getAcl(keyId: string): Promise<{ + keyId: string; + isSuperadmin: boolean; + permissions: AclPermission[]; + }> { + const enc = new Encoder(64); + enc.writeString(keyId); + + const resp = await this.conn.sendRequest(OP_GET_ACL, enc.finish()); + assertNotError(resp); + if (resp.opcode !== OP_GET_ACL_RESULT) { + throw new ProtocolError(0xff, `unexpected opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + if (errorCode !== ERR_OK) throw mapItemErrorCode(errorCode, "getAcl"); + + const resultKeyId = dec.readString(); + const isSuperadmin = dec.readBool(); + const permCount = dec.readU16(); + const permissions: AclPermission[] = []; + for (let i = 0; i < permCount; i++) { + permissions.push({ + kind: dec.readString(), + pattern: dec.readString(), + }); + } + return { keyId: resultKeyId, isSuperadmin, permissions }; + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function decodeDelivery(frame: Frame): ConsumeMessage[] { + const dec = new Decoder(frame.payload); + const count = dec.readU32(); + const messages: ConsumeMessage[] = []; + + for (let i = 0; i < count; i++) { + const id = dec.readString(); + const queue = dec.readString(); + const headers = dec.readMap(); + const payload = dec.readBytes(); + const fairnessKey = dec.readString(); + const weight = dec.readU32(); + const throttleKeys = dec.readStringArray(); + const attemptCount = dec.readU32(); + const enqueuedAt = dec.readU64(); + const leasedAt = dec.readU64(); + + messages.push({ + id, + queue, + headers, + payload: Buffer.from(payload), + fairnessKey, + weight, + throttleKeys, + attemptCount, + enqueuedAt, + leasedAt, }); } + + return messages; } diff --git a/src/connection.ts b/src/connection.ts new file mode 100644 index 0000000..e78ba95 --- /dev/null +++ b/src/connection.ts @@ -0,0 +1,365 @@ +/** + * FIBP TCP connection manager. + * + * Handles: + * - TCP + optional TLS connection + * - Handshake exchange + * - Request/response multiplexing via request IDs + * - Server-push Delivery frame routing + * - Ping/Pong keepalive + */ + +import * as net from "net"; +import * as tls from "tls"; +import { EventEmitter } from "events"; + +import { + Encoder, + Decoder, + FrameReader, + encodeFrame, + type Frame, + PROTOCOL_VERSION, + DEFAULT_MAX_FRAME_SIZE, + OP_HANDSHAKE, + OP_HANDSHAKE_OK, + OP_PING, + OP_PONG, + OP_DISCONNECT, + OP_DELIVERY, + OP_ERROR, +} from "./fibp"; + +export interface ConnectionOptions { + /** Enable TLS using OS trust store. */ + tls?: boolean; + /** CA certificate PEM for server verification. */ + caCert?: Buffer; + /** Client certificate PEM for mTLS. */ + clientCert?: Buffer; + /** Client private key PEM for mTLS. */ + clientKey?: Buffer; + /** API key for authentication (sent in handshake). */ + apiKey?: string; + /** Keepalive ping interval in ms (0 = disabled). Default: 15000. */ + pingIntervalMs?: number; +} + +interface PendingRequest { + resolve: (frame: Frame) => void; + reject: (err: Error) => void; +} + +/** + * A single multiplexed FIBP connection. + * + * - `sendRequest()` sends a frame and returns a promise for the response. + * - Delivery frames are emitted via the `delivery` event. + * - ConsumeOk frames are routed to the pending request for that request ID. + */ +export class Connection extends EventEmitter { + private socket: net.Socket | tls.TLSSocket | null = null; + private frameReader = new FrameReader(); + private nextRequestId = 1; + private pending = new Map(); + /** Callbacks for consume streams: requestId -> delivery handler. */ + private consumeHandlers = new Map void>(); + private maxFrameSize = DEFAULT_MAX_FRAME_SIZE; + private closed = false; + private pingTimer: ReturnType | null = null; + private connected = false; + + /** Server node ID from handshake. */ + nodeId = BigInt(0); + + constructor( + private readonly host: string, + private readonly port: number, + private readonly opts: ConnectionOptions = {} + ) { + super(); + } + + /** Connect, perform TLS (if configured), and complete the FIBP handshake. */ + async connect(): Promise { + await this.openSocket(); + await this.handshake(); + this.connected = true; + this.startPing(); + } + + private openSocket(): Promise { + return new Promise((resolve, reject) => { + const useTls = !!this.opts.tls || !!this.opts.caCert; + + if (useTls) { + const tlsOpts: tls.ConnectionOptions = { + host: this.host, + port: this.port, + ca: this.opts.caCert ?? undefined, + cert: this.opts.clientCert ?? undefined, + key: this.opts.clientKey ?? undefined, + // When no caCert is provided, use system trust store (default). + rejectUnauthorized: true, + }; + const sock = tls.connect(tlsOpts, () => { + resolve(); + }); + sock.on("error", (err) => { + if (!this.connected) { + reject(err); + } else { + this.handleSocketError(err); + } + }); + this.setupSocket(sock); + } else { + const sock = net.createConnection({ host: this.host, port: this.port }, () => { + resolve(); + }); + sock.on("error", (err) => { + if (!this.connected) { + reject(err); + } else { + this.handleSocketError(err); + } + }); + this.setupSocket(sock); + } + }); + } + + private setupSocket(sock: net.Socket | tls.TLSSocket): void { + this.socket = sock; + sock.on("data", (data: Buffer) => this.onData(data)); + sock.on("close", () => this.onClose()); + } + + private handleSocketError(err: Error): void { + // Reject all pending requests. + for (const [, req] of this.pending) { + req.reject(err); + } + this.pending.clear(); + this.emit("error", err); + } + + private onClose(): void { + this.closed = true; + this.stopPing(); + // Reject all pending. + const err = new Error("connection closed"); + for (const [, req] of this.pending) { + req.reject(err); + } + this.pending.clear(); + // Signal consume handlers. + for (const [, handler] of this.consumeHandlers) { + handler({ opcode: 0, flags: 0, requestId: 0, payload: Buffer.alloc(0) }); + } + this.consumeHandlers.clear(); + this.emit("close"); + } + + private onData(data: Buffer): void { + const frames = this.frameReader.feed(data); + for (const frame of frames) { + this.dispatch(frame); + } + } + + private dispatch(frame: Frame): void { + const { opcode, requestId } = frame; + + // Pong response to server ping. + if (opcode === OP_PING) { + this.sendRaw(encodeFrame(OP_PONG, 0, requestId, Buffer.alloc(0))); + return; + } + + // Ignore pong (we don't track ping responses for now). + if (opcode === OP_PONG) return; + + // Disconnect from server. + if (opcode === OP_DISCONNECT) { + this.close(); + return; + } + + // Delivery frames go to consume handlers. + if (opcode === OP_DELIVERY) { + const handler = this.consumeHandlers.get(requestId); + if (handler) handler(frame); + return; + } + + // Error frames and result frames go to pending requests. + const req = this.pending.get(requestId); + if (req) { + this.pending.delete(requestId); + req.resolve(frame); + return; + } + + // ConsumeOk also resolves the pending request, but we need to keep + // the consume handler registered. Check if there's a pending request first. + // (Already handled above since ConsumeOk goes to pending.) + } + + private async handshake(): Promise { + const enc = new Encoder(32); + enc.writeU16(PROTOCOL_VERSION); + // optional api_key + if (this.opts.apiKey) { + enc.writeU8(1); // present + enc.writeString(this.opts.apiKey); + } else { + enc.writeU8(0); // absent + } + + const frame = encodeFrame(OP_HANDSHAKE, 0, 0, enc.finish()); + this.sendRaw(frame); + + const resp = await this.waitForFrame(0, 10000); + + if (resp.opcode === OP_ERROR) { + const dec = new Decoder(resp.payload); + const errorCode = dec.readU8(); + const message = dec.readString(); + throw new Error(`handshake rejected: code=${errorCode} message=${message}`); + } + + if (resp.opcode !== OP_HANDSHAKE_OK) { + throw new Error(`unexpected handshake response opcode: 0x${resp.opcode.toString(16)}`); + } + + const dec = new Decoder(resp.payload); + dec.readU16(); // negotiated version + this.nodeId = dec.readU64(); + const maxFrame = dec.readU32(); + if (maxFrame > 0) this.maxFrameSize = maxFrame; + } + + /** Wait for a frame with a specific request ID. */ + private waitForFrame(requestId: number, timeoutMs: number): Promise { + return new Promise((resolve, reject) => { + const timer = setTimeout(() => { + this.pending.delete(requestId); + reject(new Error(`timeout waiting for response to request ${requestId}`)); + }, timeoutMs); + + this.pending.set(requestId, { + resolve: (frame) => { + clearTimeout(timer); + resolve(frame); + }, + reject: (err) => { + clearTimeout(timer); + reject(err); + }, + }); + }); + } + + /** Allocate the next request ID. Wraps at 2^32. */ + allocRequestId(): number { + const id = this.nextRequestId; + this.nextRequestId = (this.nextRequestId + 1) & 0xffffffff; + if (this.nextRequestId === 0) this.nextRequestId = 1; + return id; + } + + /** + * Send a request frame and wait for the response. + * The request ID is auto-assigned. + */ + async sendRequest(opcode: number, payload: Buffer, timeoutMs = 30000): Promise { + if (this.closed) throw new Error("connection closed"); + const requestId = this.allocRequestId(); + const frame = encodeFrame(opcode, 0, requestId, payload); + this.sendRaw(frame); + return this.waitForFrame(requestId, timeoutMs); + } + + /** + * Send a request frame with a specific request ID and wait for the response. + */ + async sendRequestWithId(opcode: number, requestId: number, payload: Buffer, timeoutMs = 30000): Promise { + if (this.closed) throw new Error("connection closed"); + const frame = encodeFrame(opcode, 0, requestId, payload); + this.sendRaw(frame); + return this.waitForFrame(requestId, timeoutMs); + } + + /** + * Register a handler for Delivery frames on the given consume request ID. + * Returns the request ID used. + */ + registerConsumeHandler(requestId: number, handler: (frame: Frame) => void): void { + this.consumeHandlers.set(requestId, handler); + } + + /** Unregister a consume handler. */ + unregisterConsumeHandler(requestId: number): void { + this.consumeHandlers.delete(requestId); + } + + /** Send a raw frame without expecting a response. */ + sendRaw(data: Buffer): void { + if (this.closed || !this.socket) return; + this.socket.write(data); + } + + /** Send a fire-and-forget frame (e.g., CancelConsume, Disconnect). */ + sendFireAndForget(opcode: number, requestId: number, payload: Buffer): void { + if (this.closed) return; + this.sendRaw(encodeFrame(opcode, 0, requestId, payload)); + } + + private startPing(): void { + const interval = this.opts.pingIntervalMs ?? 15000; + if (interval <= 0) return; + this.pingTimer = setInterval(() => { + if (this.closed) return; + const requestId = this.allocRequestId(); + this.sendRaw(encodeFrame(OP_PING, 0, requestId, Buffer.alloc(0))); + }, interval); + // Don't prevent process exit. + if (this.pingTimer.unref) this.pingTimer.unref(); + } + + private stopPing(): void { + if (this.pingTimer) { + clearInterval(this.pingTimer); + this.pingTimer = null; + } + } + + /** Gracefully close the connection. */ + async close(): Promise { + if (this.closed) return; + this.closed = true; + this.stopPing(); + try { + this.sendRaw(encodeFrame(OP_DISCONNECT, 0, 0, Buffer.alloc(0))); + } catch { + // Ignore write errors on close. + } + this.socket?.destroy(); + this.socket = null; + // Reject all pending. + const err = new Error("connection closed"); + for (const [, req] of this.pending) { + req.reject(err); + } + this.pending.clear(); + for (const [, handler] of this.consumeHandlers) { + handler({ opcode: 0, flags: 0, requestId: 0, payload: Buffer.alloc(0) }); + } + this.consumeHandlers.clear(); + } + + get isClosed(): boolean { + return this.closed; + } +} diff --git a/src/errors.ts b/src/errors.ts index 12347a3..ad91344 100644 --- a/src/errors.ts +++ b/src/errors.ts @@ -1,3 +1,24 @@ +import { + ERR_QUEUE_NOT_FOUND, + ERR_MESSAGE_NOT_FOUND, + ERR_QUEUE_ALREADY_EXISTS, + ERR_LUA_COMPILATION_ERROR, + ERR_STORAGE_ERROR, + ERR_NOT_A_DLQ, + ERR_PARENT_QUEUE_NOT_FOUND, + ERR_INVALID_CONFIG_VALUE, + ERR_CHANNEL_FULL, + ERR_UNAUTHORIZED, + ERR_FORBIDDEN, + ERR_NOT_LEADER, + ERR_UNSUPPORTED_VERSION, + ERR_INVALID_FRAME, + ERR_API_KEY_NOT_FOUND, + ERR_NODE_NOT_READY, + ERR_INTERNAL_ERROR, + ERROR_CODE_NAMES, +} from "./fibp"; + /** Base error for all Fila SDK errors. */ export class FilaError extends Error { constructor(message: string) { @@ -22,13 +43,121 @@ export class MessageNotFoundError extends FilaError { } } -/** Raised for unexpected gRPC failures, preserving status code and message. */ -export class RPCError extends FilaError { +/** Raised when attempting to create a queue that already exists. */ +export class QueueAlreadyExistsError extends FilaError { + constructor(message: string) { + super(message); + this.name = "QueueAlreadyExistsError"; + } +} + +/** Raised when missing or invalid API key. */ +export class UnauthorizedError extends FilaError { + constructor(message: string) { + super(message); + this.name = "UnauthorizedError"; + } +} + +/** Raised when the API key lacks required permissions. */ +export class ForbiddenError extends FilaError { + constructor(message: string) { + super(message); + this.name = "ForbiddenError"; + } +} + +/** Raised when the contacted node is not the leader for the queue. */ +export class NotLeaderError extends FilaError { + /** Address of the current leader, if provided by the server. */ + public readonly leaderAddr?: string; + + constructor(message: string, leaderAddr?: string) { + super(message); + this.name = "NotLeaderError"; + this.leaderAddr = leaderAddr; + } +} + +/** Raised when the API key ID is not found. */ +export class ApiKeyNotFoundError extends FilaError { + constructor(message: string) { + super(message); + this.name = "ApiKeyNotFoundError"; + } +} + +/** Raised when the server is overloaded. */ +export class ChannelFullError extends FilaError { + public readonly retryAfterMs?: number; + + constructor(message: string, retryAfterMs?: number) { + super(message); + this.name = "ChannelFullError"; + this.retryAfterMs = retryAfterMs; + } +} + +/** Raised for protocol-level errors with an error code. */ +export class ProtocolError extends FilaError { public readonly code: number; + public readonly metadata: Record; - constructor(code: number, message: string) { - super(`rpc error (code = ${code}): ${message}`); - this.name = "RPCError"; + constructor(code: number, message: string, metadata: Record = {}) { + super(`protocol error (${ERROR_CODE_NAMES[code] ?? `0x${code.toString(16)}`}): ${message}`); + this.name = "ProtocolError"; this.code = code; + this.metadata = metadata; } } + +/** + * Map a FIBP error code + message + metadata into the appropriate SDK error type. + */ +export function mapErrorCode( + code: number, + message: string, + metadata: Record = {} +): FilaError { + switch (code) { + case ERR_QUEUE_NOT_FOUND: + return new QueueNotFoundError(message); + case ERR_MESSAGE_NOT_FOUND: + return new MessageNotFoundError(message); + case ERR_QUEUE_ALREADY_EXISTS: + return new QueueAlreadyExistsError(message); + case ERR_UNAUTHORIZED: + return new UnauthorizedError(message); + case ERR_FORBIDDEN: + return new ForbiddenError(message); + case ERR_NOT_LEADER: + return new NotLeaderError(message, metadata["leader_addr"]); + case ERR_API_KEY_NOT_FOUND: + return new ApiKeyNotFoundError(message); + case ERR_CHANNEL_FULL: { + const retryAfter = metadata["retry_after_ms"]; + return new ChannelFullError(message, retryAfter ? parseInt(retryAfter, 10) : undefined); + } + case ERR_LUA_COMPILATION_ERROR: + case ERR_STORAGE_ERROR: + case ERR_NOT_A_DLQ: + case ERR_PARENT_QUEUE_NOT_FOUND: + case ERR_INVALID_CONFIG_VALUE: + case ERR_UNSUPPORTED_VERSION: + case ERR_INVALID_FRAME: + case ERR_NODE_NOT_READY: + case ERR_INTERNAL_ERROR: + return new ProtocolError(code, message, metadata); + default: + return new ProtocolError(code, message, metadata); + } +} + +/** + * Map a per-item error code (from batch results) into the appropriate SDK error. + * Per-item results only carry a u8 error code, no message/metadata. + */ +export function mapItemErrorCode(code: number, context: string): FilaError { + const name = ERROR_CODE_NAMES[code] ?? `0x${code.toString(16)}`; + return mapErrorCode(code, `${context}: ${name}`); +} diff --git a/src/fibp/codec.ts b/src/fibp/codec.ts new file mode 100644 index 0000000..3de389e --- /dev/null +++ b/src/fibp/codec.ts @@ -0,0 +1,347 @@ +/** + * FIBP encoding/decoding primitives and frame-level codec. + * + * All multi-byte integers are big-endian (network byte order). + */ + +import { + FRAME_HEADER_SIZE, + FRAME_LENGTH_PREFIX_SIZE, + FLAG_CONTINUATION, +} from "./constants"; + +// --------------------------------------------------------------------------- +// Encoder — writes into a growing Buffer +// --------------------------------------------------------------------------- + +export class Encoder { + private buf: Buffer; + private pos = 0; + + constructor(initialCapacity = 256) { + this.buf = Buffer.allocUnsafe(initialCapacity); + } + + /** Ensure at least `needed` bytes are available. */ + private grow(needed: number): void { + const remaining = this.buf.length - this.pos; + if (remaining >= needed) return; + let newSize = this.buf.length * 2; + while (newSize - this.pos < needed) newSize *= 2; + const next = Buffer.allocUnsafe(newSize); + this.buf.copy(next, 0, 0, this.pos); + this.buf = next; + } + + writeU8(v: number): void { + this.grow(1); + this.buf[this.pos++] = v & 0xff; + } + + writeU16(v: number): void { + this.grow(2); + this.buf.writeUInt16BE(v, this.pos); + this.pos += 2; + } + + writeU32(v: number): void { + this.grow(4); + this.buf.writeUInt32BE(v, this.pos); + this.pos += 4; + } + + writeU64(v: bigint): void { + this.grow(8); + this.buf.writeBigUInt64BE(v, this.pos); + this.pos += 8; + } + + writeI64(v: bigint): void { + this.grow(8); + this.buf.writeBigInt64BE(v, this.pos); + this.pos += 8; + } + + writeF64(v: number): void { + this.grow(8); + this.buf.writeDoubleBE(v, this.pos); + this.pos += 8; + } + + writeBool(v: boolean): void { + this.writeU8(v ? 1 : 0); + } + + /** Write a length-prefixed string (u16 length + UTF-8 bytes). */ + writeString(s: string): void { + const bytes = Buffer.from(s, "utf8"); + if (bytes.length > 0xffff) throw new Error(`string too long: ${bytes.length} bytes`); + this.writeU16(bytes.length); + this.grow(bytes.length); + bytes.copy(this.buf, this.pos); + this.pos += bytes.length; + } + + /** Write length-prefixed bytes (u32 length + raw). */ + writeBytes(b: Buffer): void { + this.writeU32(b.length); + this.grow(b.length); + b.copy(this.buf, this.pos); + this.pos += b.length; + } + + /** Write a map: u16 count + repeated (string key, string value). */ + writeMap(m: Record): void { + const entries = Object.entries(m); + this.writeU16(entries.length); + for (const [k, v] of entries) { + this.writeString(k); + this.writeString(v); + } + } + + /** Write a string[]: u16 count + repeated string. */ + writeStringArray(arr: string[]): void { + this.writeU16(arr.length); + for (const s of arr) { + this.writeString(s); + } + } + + /** Write optional: u8 present flag, then T if present. */ + writeOptionalString(s: string | null | undefined): void { + if (s != null) { + this.writeU8(1); + this.writeString(s); + } else { + this.writeU8(0); + } + } + + /** Raw bytes copy. */ + writeRaw(b: Buffer): void { + this.grow(b.length); + b.copy(this.buf, this.pos); + this.pos += b.length; + } + + /** Return the written portion as a Buffer (shared memory — do not mutate). */ + finish(): Buffer { + return this.buf.subarray(0, this.pos); + } +} + +// --------------------------------------------------------------------------- +// Decoder — reads from a Buffer +// --------------------------------------------------------------------------- + +export class Decoder { + private readonly buf: Buffer; + private pos: number; + + constructor(buf: Buffer, offset = 0) { + this.buf = buf; + this.pos = offset; + } + + get remaining(): number { + return this.buf.length - this.pos; + } + + get offset(): number { + return this.pos; + } + + private check(n: number): void { + if (this.pos + n > this.buf.length) { + throw new Error(`decode underflow: need ${n}, have ${this.buf.length - this.pos}`); + } + } + + readU8(): number { + this.check(1); + return this.buf[this.pos++]; + } + + readU16(): number { + this.check(2); + const v = this.buf.readUInt16BE(this.pos); + this.pos += 2; + return v; + } + + readU32(): number { + this.check(4); + const v = this.buf.readUInt32BE(this.pos); + this.pos += 4; + return v; + } + + readU64(): bigint { + this.check(8); + const v = this.buf.readBigUInt64BE(this.pos); + this.pos += 8; + return v; + } + + readI64(): bigint { + this.check(8); + const v = this.buf.readBigInt64BE(this.pos); + this.pos += 8; + return v; + } + + readF64(): number { + this.check(8); + const v = this.buf.readDoubleBE(this.pos); + this.pos += 8; + return v; + } + + readBool(): boolean { + return this.readU8() !== 0; + } + + readString(): string { + const len = this.readU16(); + this.check(len); + const s = this.buf.toString("utf8", this.pos, this.pos + len); + this.pos += len; + return s; + } + + readBytes(): Buffer { + const len = this.readU32(); + this.check(len); + const b = this.buf.subarray(this.pos, this.pos + len); + this.pos += len; + return b; + } + + readMap(): Record { + const count = this.readU16(); + const m: Record = {}; + for (let i = 0; i < count; i++) { + const k = this.readString(); + const v = this.readString(); + m[k] = v; + } + return m; + } + + readStringArray(): string[] { + const count = this.readU16(); + const arr: string[] = []; + for (let i = 0; i < count; i++) { + arr.push(this.readString()); + } + return arr; + } + + readOptionalString(): string | null { + const present = this.readU8(); + if (present) return this.readString(); + return null; + } +} + +// --------------------------------------------------------------------------- +// Frame structure +// --------------------------------------------------------------------------- + +export interface Frame { + opcode: number; + flags: number; + requestId: number; + payload: Buffer; +} + +/** + * Build a complete wire frame: [u32 frame_length][u8 opcode][u8 flags][u32 request_id][payload]. + */ +export function encodeFrame( + opcode: number, + flags: number, + requestId: number, + payload: Buffer +): Buffer { + const frameLen = FRAME_HEADER_SIZE + payload.length; + const wire = Buffer.allocUnsafe(FRAME_LENGTH_PREFIX_SIZE + frameLen); + wire.writeUInt32BE(frameLen, 0); + wire[4] = opcode; + wire[5] = flags; + wire.writeUInt32BE(requestId, 6); + payload.copy(wire, 10); + return wire; +} + +/** + * FrameReader accumulates bytes from a TCP stream and emits complete frames. + * Handles continuation frames by reassembling them transparently. + */ +export class FrameReader { + private buffer = Buffer.alloc(0); + /** Continuation buffers keyed by `opcode:requestId`. */ + private continuations = new Map(); + + /** + * Feed incoming data and return any complete, reassembled frames. + */ + feed(data: Buffer): Frame[] { + this.buffer = Buffer.concat([this.buffer, data]); + const frames: Frame[] = []; + + while (this.buffer.length >= FRAME_LENGTH_PREFIX_SIZE) { + const frameLen = this.buffer.readUInt32BE(0); + const totalLen = FRAME_LENGTH_PREFIX_SIZE + frameLen; + if (this.buffer.length < totalLen) break; + + if (frameLen < FRAME_HEADER_SIZE) { + // Malformed — skip this frame. + this.buffer = this.buffer.subarray(totalLen); + continue; + } + + const opcode = this.buffer[4]; + const flags = this.buffer[5]; + const requestId = this.buffer.readUInt32BE(6); + const payload = this.buffer.subarray(10, totalLen); + this.buffer = this.buffer.subarray(totalLen); + + const isContinuation = (flags & FLAG_CONTINUATION) !== 0; + const key = `${opcode}:${requestId}`; + + if (isContinuation) { + // Buffer continuation chunk. + let chunks = this.continuations.get(key); + if (!chunks) { + chunks = []; + this.continuations.set(key, chunks); + } + chunks.push(Buffer.from(payload)); + } else { + // Final frame — may be standalone or the last continuation chunk. + const chunks = this.continuations.get(key); + if (chunks) { + chunks.push(Buffer.from(payload)); + this.continuations.delete(key); + frames.push({ + opcode, + flags: 0, + requestId, + payload: Buffer.concat(chunks), + }); + } else { + frames.push({ + opcode, + flags: 0, + requestId, + payload: Buffer.from(payload), + }); + } + } + } + + return frames; + } +} diff --git a/src/fibp/constants.ts b/src/fibp/constants.ts new file mode 100644 index 0000000..4c2e883 --- /dev/null +++ b/src/fibp/constants.ts @@ -0,0 +1,109 @@ +/** FIBP protocol version. */ +export const PROTOCOL_VERSION = 1; + +/** Frame header size: opcode(1) + flags(1) + request_id(4) = 6 bytes. */ +export const FRAME_HEADER_SIZE = 6; + +/** Frame length prefix size. */ +export const FRAME_LENGTH_PREFIX_SIZE = 4; + +/** Default max frame size (16 MiB). */ +export const DEFAULT_MAX_FRAME_SIZE = 16 * 1024 * 1024; + +/** Flags */ +export const FLAG_CONTINUATION = 0x01; + +// --- Opcodes --- + +// Control +export const OP_HANDSHAKE = 0x01; +export const OP_HANDSHAKE_OK = 0x02; +export const OP_PING = 0x03; +export const OP_PONG = 0x04; +export const OP_DISCONNECT = 0x05; + +// Hot-path +export const OP_ENQUEUE = 0x10; +export const OP_ENQUEUE_RESULT = 0x11; +export const OP_CONSUME = 0x12; +export const OP_CONSUME_OK = 0x13; +export const OP_DELIVERY = 0x14; +export const OP_CANCEL_CONSUME = 0x15; +export const OP_ACK = 0x16; +export const OP_ACK_RESULT = 0x17; +export const OP_NACK = 0x18; +export const OP_NACK_RESULT = 0x19; + +// Error +export const OP_ERROR = 0xfe; + +// Admin +export const OP_CREATE_QUEUE = 0xfd; +export const OP_CREATE_QUEUE_RESULT = 0xfc; +export const OP_DELETE_QUEUE = 0xfb; +export const OP_DELETE_QUEUE_RESULT = 0xfa; +export const OP_GET_STATS = 0xf9; +export const OP_GET_STATS_RESULT = 0xf8; +export const OP_LIST_QUEUES = 0xf7; +export const OP_LIST_QUEUES_RESULT = 0xf6; +export const OP_SET_CONFIG = 0xf5; +export const OP_SET_CONFIG_RESULT = 0xf4; +export const OP_GET_CONFIG = 0xf3; +export const OP_GET_CONFIG_RESULT = 0xf2; +export const OP_LIST_CONFIG = 0xf1; +export const OP_LIST_CONFIG_RESULT = 0xf0; +export const OP_REDRIVE = 0xef; +export const OP_REDRIVE_RESULT = 0xee; +export const OP_CREATE_API_KEY = 0xed; +export const OP_CREATE_API_KEY_RESULT = 0xec; +export const OP_REVOKE_API_KEY = 0xeb; +export const OP_REVOKE_API_KEY_RESULT = 0xea; +export const OP_LIST_API_KEYS = 0xe9; +export const OP_LIST_API_KEYS_RESULT = 0xe8; +export const OP_SET_ACL = 0xe7; +export const OP_SET_ACL_RESULT = 0xe6; +export const OP_GET_ACL = 0xe5; +export const OP_GET_ACL_RESULT = 0xe4; + +// --- Error Codes --- + +export const ERR_OK = 0x00; +export const ERR_QUEUE_NOT_FOUND = 0x01; +export const ERR_MESSAGE_NOT_FOUND = 0x02; +export const ERR_QUEUE_ALREADY_EXISTS = 0x03; +export const ERR_LUA_COMPILATION_ERROR = 0x04; +export const ERR_STORAGE_ERROR = 0x05; +export const ERR_NOT_A_DLQ = 0x06; +export const ERR_PARENT_QUEUE_NOT_FOUND = 0x07; +export const ERR_INVALID_CONFIG_VALUE = 0x08; +export const ERR_CHANNEL_FULL = 0x09; +export const ERR_UNAUTHORIZED = 0x0a; +export const ERR_FORBIDDEN = 0x0b; +export const ERR_NOT_LEADER = 0x0c; +export const ERR_UNSUPPORTED_VERSION = 0x0d; +export const ERR_INVALID_FRAME = 0x0e; +export const ERR_API_KEY_NOT_FOUND = 0x0f; +export const ERR_NODE_NOT_READY = 0x10; +export const ERR_INTERNAL_ERROR = 0xff; + +/** Map error code to human-readable name. */ +export const ERROR_CODE_NAMES: Record = { + [ERR_OK]: "Ok", + [ERR_QUEUE_NOT_FOUND]: "QueueNotFound", + [ERR_MESSAGE_NOT_FOUND]: "MessageNotFound", + [ERR_QUEUE_ALREADY_EXISTS]: "QueueAlreadyExists", + [ERR_LUA_COMPILATION_ERROR]: "LuaCompilationError", + [ERR_STORAGE_ERROR]: "StorageError", + [ERR_NOT_A_DLQ]: "NotADLQ", + [ERR_PARENT_QUEUE_NOT_FOUND]: "ParentQueueNotFound", + [ERR_INVALID_CONFIG_VALUE]: "InvalidConfigValue", + [ERR_CHANNEL_FULL]: "ChannelFull", + [ERR_UNAUTHORIZED]: "Unauthorized", + [ERR_FORBIDDEN]: "Forbidden", + [ERR_NOT_LEADER]: "NotLeader", + [ERR_UNSUPPORTED_VERSION]: "UnsupportedVersion", + [ERR_INVALID_FRAME]: "InvalidFrame", + [ERR_API_KEY_NOT_FOUND]: "ApiKeyNotFound", + [ERR_NODE_NOT_READY]: "NodeNotReady", + [ERR_INTERNAL_ERROR]: "InternalError", +}; diff --git a/src/fibp/index.ts b/src/fibp/index.ts new file mode 100644 index 0000000..bb95ccb --- /dev/null +++ b/src/fibp/index.ts @@ -0,0 +1,3 @@ +export { Encoder, Decoder, FrameReader, encodeFrame } from "./codec"; +export type { Frame } from "./codec"; +export * from "./constants"; diff --git a/src/index.ts b/src/index.ts index 9f479a0..bb22992 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,9 +1,23 @@ export { Client } from "./client"; export type { ClientOptions } from "./client"; -export type { ConsumeMessage, EnqueueMessage, EnqueueResult } from "./types"; +export type { + ConsumeMessage, + EnqueueMessage, + EnqueueResult, + QueueStats, + QueueInfo, + ApiKeyInfo, + AclPermission, +} from "./types"; export { FilaError, QueueNotFoundError, MessageNotFoundError, - RPCError, + QueueAlreadyExistsError, + UnauthorizedError, + ForbiddenError, + NotLeaderError, + ApiKeyNotFoundError, + ChannelFullError, + ProtocolError, } from "./errors"; diff --git a/src/types.ts b/src/types.ts index 99706b8..32e2890 100644 --- a/src/types.ts +++ b/src/types.ts @@ -12,6 +12,14 @@ export interface ConsumeMessage { attemptCount: number; /** Queue the message belongs to. */ queue: string; + /** Message weight. */ + weight: number; + /** Throttle keys. */ + throttleKeys: string[]; + /** Enqueued-at timestamp (Unix ms). */ + enqueuedAt: bigint; + /** Leased-at timestamp (Unix ms, 0 if unavailable). */ + leasedAt: bigint; } /** A single message specification for enqueue operations. */ @@ -28,3 +36,50 @@ export interface EnqueueMessage { export type EnqueueResult = | { success: true; messageId: string } | { success: false; error: string }; + +/** Queue statistics returned by getStats(). */ +export interface QueueStats { + depth: bigint; + inFlight: bigint; + activeFairnessKeys: bigint; + activeConsumers: number; + quantum: number; + leaderNodeId: bigint; + replicationCount: number; + perKeyStats: Array<{ + key: string; + pendingCount: bigint; + currentDeficit: bigint; + weight: number; + }>; + perThrottleStats: Array<{ + key: string; + tokens: number; + ratePerSecond: number; + burst: number; + }>; +} + +/** Queue info returned by listQueues(). */ +export interface QueueInfo { + name: string; + depth: bigint; + inFlight: bigint; + activeConsumers: number; + leaderNodeId: bigint; +} + +/** API key info returned by listApiKeys(). */ +export interface ApiKeyInfo { + keyId: string; + name: string; + createdAtMs: bigint; + expiresAtMs: bigint; + isSuperadmin: boolean; +} + +/** ACL permission. */ +export interface AclPermission { + kind: string; + pattern: string; +} diff --git a/test/auth.test.ts b/test/auth.test.ts index 8ee6732..07f3fe1 100644 --- a/test/auth.test.ts +++ b/test/auth.test.ts @@ -2,9 +2,7 @@ import { describe, it, expect, beforeAll, afterAll } from "vitest"; import * as fs from "fs"; import * as os from "os"; import * as path from "path"; -import * as grpc from "@grpc/grpc-js"; import { Client } from "../src"; -import { RPCError } from "../src/errors"; import { startTestServer, generateTestCerts, @@ -31,6 +29,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { it("enqueue succeeds with valid API key", async () => { await server.createQueue("auth-test-ok"); const client = new Client(server.addr, { apiKey: BOOTSTRAP_KEY }); + await client.connect(); try { const msgId = await client.enqueue( "auth-test-ok", @@ -43,37 +42,21 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { } }); - it("enqueue fails without API key (unauthenticated)", async () => { - await server.createQueue("auth-test-nokey"); + it("connect fails without API key (unauthenticated)", async () => { const client = new Client(server.addr); - try { - await expect( - client.enqueue("auth-test-nokey", null, Buffer.from("fail")) - ).rejects.toSatisfy((err: unknown) => { - expect(err).toBeInstanceOf(RPCError); - expect((err as RPCError).code).toBe(grpc.status.UNAUTHENTICATED); - return true; - }); - } finally { - await client.close(); - } + // The handshake should be rejected without a valid API key. + await expect(client.connect()).rejects.toThrow(); }); - it("enqueue fails with wrong API key", async () => { - await server.createQueue("auth-test-badkey"); + it("connect fails with wrong API key", async () => { const client = new Client(server.addr, { apiKey: "wrong-key" }); - try { - await expect( - client.enqueue("auth-test-badkey", null, Buffer.from("fail")) - ).rejects.toThrow(RPCError); - } finally { - await client.close(); - } + await expect(client.connect()).rejects.toThrow(); }); it("consume works with valid API key", async () => { await server.createQueue("auth-consume"); const client = new Client(server.addr, { apiKey: BOOTSTRAP_KEY }); + await client.connect(); try { await client.enqueue("auth-consume", null, Buffer.from("msg")); @@ -98,19 +81,16 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { const certDir = fs.mkdtempSync(path.join(os.tmpdir(), "fila-tls-test-")); certs = generateTestCerts(certDir); - // Write server certs to a temp dir, then reference from config. const serverCertPath = path.join(certDir, "server.pem"); const serverKeyPath = path.join(certDir, "server.key"); - const adminCreds = grpc.credentials.createSsl(certs.caCert); - server = await startTestServer({ extraConfig: [ `[tls]`, `cert_file = "${serverCertPath}"`, `key_file = "${serverKeyPath}"`, ].join("\n"), - adminCreds, + adminTls: { caCert: certs.caCert }, }); }, 30_000); @@ -123,6 +103,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { const client = new Client(server.addr, { caCert: certs.caCert, }); + await client.connect(); try { const msgId = await client.enqueue( "tls-test-ok", @@ -138,13 +119,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { it("fails without CA cert (insecure against TLS server)", async () => { await server.createQueue("tls-test-insecure"); const client = new Client(server.addr); - try { - await expect( - client.enqueue("tls-test-insecure", null, Buffer.from("fail")) - ).rejects.toThrow(); - } finally { - await client.close(); - } + await expect(client.connect()).rejects.toThrow(); }); }); @@ -159,12 +134,6 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { const serverKeyPath = path.join(certDir, "server.key"); const caCertPath = path.join(certDir, "ca.pem"); - const adminCreds = grpc.credentials.createSsl( - certs.caCert, - certs.clientKey, - certs.clientCert - ); - server = await startTestServer({ extraConfig: [ `[tls]`, @@ -174,7 +143,11 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { `[auth]`, `bootstrap_apikey = "${BOOTSTRAP_KEY}"`, ].join("\n"), - adminCreds, + adminTls: { + caCert: certs.caCert, + clientCert: certs.clientCert, + clientKey: certs.clientKey, + }, adminApiKey: BOOTSTRAP_KEY, }); }, 30_000); @@ -191,6 +164,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { clientKey: certs.clientKey, apiKey: BOOTSTRAP_KEY, }); + await client.connect(); try { const msgId = await client.enqueue( "mtls-auth-ok", @@ -211,6 +185,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { clientKey: certs.clientKey, apiKey: BOOTSTRAP_KEY, }); + await client.connect(); try { const msgId = await client.enqueue( "mtls-full-flow", diff --git a/test/batch.test.ts b/test/batch.test.ts index 40ea0e2..22751de 100644 --- a/test/batch.test.ts +++ b/test/batch.test.ts @@ -6,6 +6,7 @@ import { FILA_SERVER_AVAILABLE, type TestServer, } from "./helpers"; +import type { ClientOptions } from "../src/client"; describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { let server: TestServer; @@ -19,10 +20,11 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { }); describe("enqueueMany", () => { - it("enqueues multiple messages in a single RPC", async () => { + it("enqueues multiple messages in a single request", async () => { await server.createQueue("multi-enqueue"); const client = new Client(server.addr, { batchMode: "disabled" }); + await client.connect(); try { const results = await client.enqueueMany([ { queue: "multi-enqueue", headers: { idx: "0" }, payload: Buffer.from("msg-0") }, @@ -59,6 +61,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { await server.createQueue("multi-partial"); const client = new Client(server.addr, { batchMode: "disabled" }); + await client.connect(); try { const results = await client.enqueueMany([ { queue: "multi-partial", headers: {}, payload: Buffer.from("ok") }, @@ -80,6 +83,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { await server.createQueue("multi-order"); const client = new Client(server.addr, { batchMode: "disabled" }); + await client.connect(); try { const results = await client.enqueueMany([ { queue: "multi-order", headers: {}, payload: Buffer.from("first") }, @@ -103,6 +107,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { await server.createQueue("auto-batch"); const client = new Client(server.addr); + await client.connect(); try { const msgId = await client.enqueue( "auto-batch", @@ -130,9 +135,8 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { await server.createQueue("auto-concurrent"); const client = new Client(server.addr); + await client.connect(); try { - // Fire multiple enqueues concurrently — they should batch together - // since they arrive within the same event loop turn. const promises = Array.from({ length: 5 }, (_, i) => client.enqueue( "auto-concurrent", @@ -146,7 +150,6 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { for (const id of messageIds) { expect(id).toBeTruthy(); } - // All IDs should be unique. expect(new Set(messageIds).size).toBe(5); } finally { await client.close(); @@ -155,9 +158,8 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { it("preserves QueueNotFoundError for single-item batches", async () => { const client = new Client(server.addr); + await client.connect(); try { - // Single message to nonexistent queue: should get QueueNotFoundError - // because the per-result error code is mapped to QueueNotFoundError. await expect( client.enqueue("no-such-queue-auto", null, Buffer.from("fail")) ).rejects.toThrow(QueueNotFoundError); @@ -172,6 +174,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { await server.createQueue("no-batch"); const client = new Client(server.addr, { batchMode: "disabled" }); + await client.connect(); try { const msgId = await client.enqueue( "no-batch", @@ -196,6 +199,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { it("enqueue to nonexistent queue throws QueueNotFoundError", async () => { const client = new Client(server.addr, { batchMode: "disabled" }); + await client.connect(); try { await expect( client.enqueue("no-such-queue-disabled", null, Buffer.from("fail")) @@ -215,6 +219,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { lingerMs: 50, batchSize: 100, }); + await client.connect(); try { const msgId = await client.enqueue( "linger-batch", @@ -232,9 +237,10 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { const client = new Client(server.addr, { batchMode: "linger", - lingerMs: 5000, // Long timer — batch should flush by size first. + lingerMs: 5000, batchSize: 3, }); + await client.connect(); try { const promises = Array.from({ length: 3 }, (_, i) => client.enqueue( @@ -266,23 +272,22 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { await server.createQueue("close-drain"); const client = new Client(server.addr); + await client.connect(); - // Enqueue a message and immediately close. const enqueuePromise = client.enqueue( "close-drain", null, Buffer.from("drained") ); - // Close should wait for pending messages. await client.close(); - // The enqueue should have completed before close returned. const msgId = await enqueuePromise; expect(msgId).toBeTruthy(); // Verify the message arrived at the server. const verifyClient = new Client(server.addr, { batchMode: "disabled" }); + await verifyClient.connect(); try { let received = false; for await (const msg of verifyClient.consume("close-drain")) { @@ -299,6 +304,3 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { }); }); }); - -// Import ClientOptions type for the constructor test. -import type { ClientOptions } from "../src/client"; diff --git a/test/batcher.unit.test.ts b/test/batcher.unit.test.ts index 20ae8e2..d5013f3 100644 --- a/test/batcher.unit.test.ts +++ b/test/batcher.unit.test.ts @@ -4,7 +4,6 @@ import { Client } from "../src"; describe("Batcher unit tests (no server)", () => { it("default batch mode is auto", () => { // Creating a client with default options should not throw. - // The batcher is initialized but won't do anything until enqueue is called. const client = new Client("localhost:9999"); // close() should succeed even without a real server (just closes channel). client.close(); @@ -44,7 +43,6 @@ describe("Batcher unit tests (no server)", () => { it("close() resolves immediately when no pending messages", async () => { const client = new Client("localhost:9999"); await client.close(); - // Should not hang. }); it("close() resolves immediately when batching is disabled", async () => { diff --git a/test/client.test.ts b/test/client.test.ts index d546b28..b154c07 100644 --- a/test/client.test.ts +++ b/test/client.test.ts @@ -22,6 +22,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Client", () => { await server.createQueue("test-enqueue-ack"); const client = new Client(server.addr); + await client.connect(); try { const msgId = await client.enqueue( "test-enqueue-ack", @@ -30,7 +31,6 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Client", () => { ); expect(msgId).toBeTruthy(); - // Consume one message then break. let received = false; for await (const msg of client.consume("test-enqueue-ack")) { expect(msg.id).toBe(msgId); @@ -53,6 +53,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Client", () => { await server.createQueue("test-nack-redeliver"); const client = new Client(server.addr); + await client.connect(); try { await client.enqueue( "test-nack-redeliver", @@ -60,7 +61,6 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Client", () => { Buffer.from("retry-me") ); - // Keep the same stream open — redelivery arrives on the same stream. let deliveryCount = 0; for await (const msg of client.consume("test-nack-redeliver")) { if (deliveryCount === 0) { @@ -83,6 +83,7 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Client", () => { it("enqueue to nonexistent queue throws QueueNotFoundError", async () => { const client = new Client(server.addr); + await client.connect(); try { await expect( client.enqueue("no-such-queue", null, Buffer.from("fail")) diff --git a/test/codec.test.ts b/test/codec.test.ts new file mode 100644 index 0000000..0bd2ec4 --- /dev/null +++ b/test/codec.test.ts @@ -0,0 +1,176 @@ +import { describe, it, expect } from "vitest"; +import { Encoder, Decoder, FrameReader, encodeFrame } from "../src/fibp"; + +describe("FIBP codec", () => { + describe("Encoder/Decoder round-trips", () => { + it("u8", () => { + const enc = new Encoder(); + enc.writeU8(0); + enc.writeU8(255); + const dec = new Decoder(enc.finish()); + expect(dec.readU8()).toBe(0); + expect(dec.readU8()).toBe(255); + }); + + it("u16", () => { + const enc = new Encoder(); + enc.writeU16(0); + enc.writeU16(65535); + const dec = new Decoder(enc.finish()); + expect(dec.readU16()).toBe(0); + expect(dec.readU16()).toBe(65535); + }); + + it("u32", () => { + const enc = new Encoder(); + enc.writeU32(0); + enc.writeU32(0xffffffff); + const dec = new Decoder(enc.finish()); + expect(dec.readU32()).toBe(0); + expect(dec.readU32()).toBe(0xffffffff); + }); + + it("u64", () => { + const enc = new Encoder(); + enc.writeU64(BigInt(0)); + enc.writeU64(BigInt("18446744073709551615")); + const dec = new Decoder(enc.finish()); + expect(dec.readU64()).toBe(BigInt(0)); + expect(dec.readU64()).toBe(BigInt("18446744073709551615")); + }); + + it("i64", () => { + const enc = new Encoder(); + enc.writeI64(BigInt(-1)); + enc.writeI64(BigInt("9223372036854775807")); + const dec = new Decoder(enc.finish()); + expect(dec.readI64()).toBe(BigInt(-1)); + expect(dec.readI64()).toBe(BigInt("9223372036854775807")); + }); + + it("f64", () => { + const enc = new Encoder(); + enc.writeF64(3.14); + enc.writeF64(-0.0); + const dec = new Decoder(enc.finish()); + expect(dec.readF64()).toBeCloseTo(3.14); + expect(dec.readF64()).toBe(-0.0); + }); + + it("bool", () => { + const enc = new Encoder(); + enc.writeBool(true); + enc.writeBool(false); + const dec = new Decoder(enc.finish()); + expect(dec.readBool()).toBe(true); + expect(dec.readBool()).toBe(false); + }); + + it("string", () => { + const enc = new Encoder(); + enc.writeString(""); + enc.writeString("hello world"); + enc.writeString("emoji \u{1F600}"); + const dec = new Decoder(enc.finish()); + expect(dec.readString()).toBe(""); + expect(dec.readString()).toBe("hello world"); + expect(dec.readString()).toBe("emoji \u{1F600}"); + }); + + it("bytes", () => { + const enc = new Encoder(); + enc.writeBytes(Buffer.alloc(0)); + enc.writeBytes(Buffer.from([1, 2, 3])); + const dec = new Decoder(enc.finish()); + expect(dec.readBytes()).toEqual(Buffer.alloc(0)); + expect(dec.readBytes()).toEqual(Buffer.from([1, 2, 3])); + }); + + it("map", () => { + const enc = new Encoder(); + enc.writeMap({}); + enc.writeMap({ a: "1", b: "2" }); + const dec = new Decoder(enc.finish()); + expect(dec.readMap()).toEqual({}); + expect(dec.readMap()).toEqual({ a: "1", b: "2" }); + }); + + it("string array", () => { + const enc = new Encoder(); + enc.writeStringArray([]); + enc.writeStringArray(["foo", "bar"]); + const dec = new Decoder(enc.finish()); + expect(dec.readStringArray()).toEqual([]); + expect(dec.readStringArray()).toEqual(["foo", "bar"]); + }); + + it("optional string", () => { + const enc = new Encoder(); + enc.writeOptionalString(null); + enc.writeOptionalString("present"); + const dec = new Decoder(enc.finish()); + expect(dec.readOptionalString()).toBeNull(); + expect(dec.readOptionalString()).toBe("present"); + }); + }); + + describe("FrameReader", () => { + it("reads a single frame", () => { + const payload = Buffer.from("test"); + const wire = encodeFrame(0x10, 0, 42, payload); + + const reader = new FrameReader(); + const frames = reader.feed(wire); + expect(frames).toHaveLength(1); + expect(frames[0].opcode).toBe(0x10); + expect(frames[0].requestId).toBe(42); + expect(frames[0].payload.toString()).toBe("test"); + }); + + it("reads multiple frames from a single buffer", () => { + const f1 = encodeFrame(0x01, 0, 1, Buffer.from("a")); + const f2 = encodeFrame(0x02, 0, 2, Buffer.from("b")); + const combined = Buffer.concat([f1, f2]); + + const reader = new FrameReader(); + const frames = reader.feed(combined); + expect(frames).toHaveLength(2); + expect(frames[0].opcode).toBe(0x01); + expect(frames[1].opcode).toBe(0x02); + }); + + it("handles partial frames across multiple feeds", () => { + const wire = encodeFrame(0x10, 0, 1, Buffer.from("hello")); + + const reader = new FrameReader(); + // Feed partial data. + const part1 = wire.subarray(0, 5); + const part2 = wire.subarray(5); + + expect(reader.feed(part1)).toHaveLength(0); + const frames = reader.feed(part2); + expect(frames).toHaveLength(1); + expect(frames[0].payload.toString()).toBe("hello"); + }); + + it("reassembles continuation frames", () => { + const p1 = encodeFrame(0x10, 0x01, 1, Buffer.from("hello")); + const p2 = encodeFrame(0x10, 0x00, 1, Buffer.from(" world")); + + const reader = new FrameReader(); + expect(reader.feed(p1)).toHaveLength(0); // continuation + const frames = reader.feed(p2); + expect(frames).toHaveLength(1); + expect(frames[0].payload.toString()).toBe("hello world"); + expect(frames[0].opcode).toBe(0x10); + expect(frames[0].requestId).toBe(1); + }); + }); + + describe("Decoder underflow", () => { + it("throws on underflow", () => { + const dec = new Decoder(Buffer.alloc(0)); + expect(() => dec.readU8()).toThrow("decode underflow"); + }); + }); +}); diff --git a/test/helpers.ts b/test/helpers.ts index 47e43ac..ae4e9bb 100644 --- a/test/helpers.ts +++ b/test/helpers.ts @@ -1,12 +1,10 @@ -import { spawn, execFileSync } from "child_process"; +import { spawn } from "child_process"; import * as fs from "fs"; import * as net from "net"; import * as os from "os"; import * as path from "path"; -import * as grpc from "@grpc/grpc-js"; -import * as protoLoader from "@grpc/proto-loader"; - -const PROTO_DIR = path.join(__dirname, "..", "proto"); +import { execFileSync } from "child_process"; +import { Client } from "../src"; function findServerBinary(): string { if (process.env.FILA_SERVER_BIN) { @@ -40,10 +38,14 @@ export interface TestServerOptions { extraConfig?: string; /** Environment variables to merge. */ extraEnv?: Record; - /** gRPC credentials for admin/readiness connections (default: insecure). */ - adminCreds?: grpc.ChannelCredentials; - /** API key metadata to attach to admin RPCs. */ + /** API key for admin operations (used in handshake). */ adminApiKey?: string; + /** TLS options for admin connections. */ + adminTls?: { + caCert?: Buffer; + clientCert?: Buffer; + clientKey?: Buffer; + }; } export const FILA_SERVER_BIN = findServerBinary(); @@ -78,33 +80,31 @@ export async function startTestServer( stderrBuf += chunk.toString(); }); - const creds = opts?.adminCreds ?? grpc.credentials.createInsecure(); - const adminMeta = new grpc.Metadata(); - if (opts?.adminApiKey) { - adminMeta.set("authorization", `Bearer ${opts.adminApiKey}`); - } - - // Wait for server ready. 20s to accommodate TLS + startup in CI. - const deadline = Date.now() + 20000; - let ready = false; let exited = false; proc.on("exit", () => { exited = true; }); - // Create a fresh gRPC client per readiness probe. A persistent client can - // enter TRANSIENT_FAILURE with aggressive backoff after the first failed TLS - // handshake, preventing recovery. Fresh clients guarantee a clean attempt. + // Wait for server ready by attempting a listQueues via FIBP. + const deadline = Date.now() + 20000; + let ready = false; let lastErr: unknown; + while (Date.now() < deadline && !exited) { - const probeClient = createAdminClient(addr, creds); try { - await callListQueues(probeClient, adminMeta); + const probe = new Client(addr, { + apiKey: opts?.adminApiKey, + tls: !!opts?.adminTls, + caCert: opts?.adminTls?.caCert, + clientCert: opts?.adminTls?.clientCert, + clientKey: opts?.adminTls?.clientKey, + }); + await probe.connect(); + await probe.listQueues(); + await probe.close(); ready = true; break; } catch (err) { lastErr = err; await sleep(500); - } finally { - probeClient.close(); } } @@ -116,72 +116,30 @@ export async function startTestServer( throw new Error(`fila-server failed to start within 20s on ${addr}${detail}${probeDetail}`); } - const adminClient = createAdminClient(addr, creds); + // Create admin client for createQueue helper. + const adminClient = new Client(addr, { + apiKey: opts?.adminApiKey, + tls: !!opts?.adminTls, + caCert: opts?.adminTls?.caCert, + clientCert: opts?.adminTls?.clientCert, + clientKey: opts?.adminTls?.clientKey, + }); + await adminClient.connect(); return { addr, dataDir, stop: () => { proc.kill(); - adminClient.close(); + adminClient.close().catch(() => {}); fs.rmSync(dataDir, { recursive: true, force: true }); }, - createQueue: (name: string) => { - return new Promise((resolve, reject) => { - adminClient.createQueue( - { name, config: {} }, - adminMeta, - (err: grpc.ServiceError | null) => { - if (err) reject(err); - else resolve(); - } - ); - }); + createQueue: async (name: string) => { + await adminClient.createQueue(name); }, }; } -function loadAdminProto(): grpc.ServiceClientConstructor { - const packageDef = protoLoader.loadSync( - [ - path.join(PROTO_DIR, "fila", "v1", "admin.proto"), - path.join(PROTO_DIR, "fila", "v1", "messages.proto"), - ], - { - keepCase: false, - longs: String, - enums: String, - defaults: true, - oneofs: true, - includeDirs: [PROTO_DIR], - } - ); - const proto = grpc.loadPackageDefinition(packageDef); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - return (proto.fila as any).v1.FilaAdmin as grpc.ServiceClientConstructor; -} - -function createAdminClient( - addr: string, - creds: grpc.ChannelCredentials -): grpc.Client { - const AdminService = loadAdminProto(); - return new AdminService(addr, creds); -} - -function callListQueues( - client: grpc.Client, - metadata: grpc.Metadata -): Promise { - return new Promise((resolve, reject) => { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (client as any).listQueues({}, metadata, (err: grpc.ServiceError | null) => { - if (err) reject(err); - else resolve(); - }); - }); -} - function sleep(ms: number): Promise { return new Promise((resolve) => setTimeout(resolve, ms)); } @@ -194,8 +152,6 @@ export function generateTestCerts(outputDir: string): { clientCert: Buffer; clientKey: Buffer; } { - - const caKeyPath = path.join(outputDir, "ca.key"); const caCertPath = path.join(outputDir, "ca.pem"); const serverKeyPath = path.join(outputDir, "server.key"); @@ -207,19 +163,16 @@ export function generateTestCerts(outputDir: string): { const serverExtPath = path.join(outputDir, "server-ext.cnf"); const clientExtPath = path.join(outputDir, "client-ext.cnf"); - // Write server SAN extension config. fs.writeFileSync( serverExtPath, "subjectAltName=IP:127.0.0.1,DNS:localhost\nextendedKeyUsage=serverAuth\n" ); - // Write client extension config (rustls requires clientAuth EKU). fs.writeFileSync( clientExtPath, "extendedKeyUsage=clientAuth\n" ); - // CA key + cert. execFileSync( "openssl", [ @@ -230,7 +183,6 @@ export function generateTestCerts(outputDir: string): { { stdio: "ignore" } ); - // Server key + CSR + cert signed by CA. execFileSync( "openssl", [ @@ -251,7 +203,6 @@ export function generateTestCerts(outputDir: string): { { stdio: "ignore" } ); - // Client key + CSR + cert signed by CA. execFileSync( "openssl", [ diff --git a/tsconfig.json b/tsconfig.json index 889749a..15e1681 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -3,6 +3,7 @@ "target": "ES2022", "module": "commonjs", "lib": ["ES2022"], + "types": ["node"], "declaration": true, "strict": true, "esModuleInterop": true, @@ -13,6 +14,6 @@ "resolveJsonModule": true, "moduleResolution": "node" }, - "include": ["src/**/*", "generated/**/*"], + "include": ["src/**/*"], "exclude": ["node_modules", "dist", "test"] } diff --git a/vitest.config.ts b/vitest.config.ts index 4870abb..c3076df 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -3,5 +3,6 @@ import { defineConfig } from "vitest/config"; export default defineConfig({ test: { testTimeout: 30000, + hookTimeout: 30000, }, }); From c26d1d6be702cf5e406db512c9a090288f1a9257 Mon Sep 17 00:00:00 2001 From: Lucas Vieira Date: Sat, 4 Apr 2026 10:15:20 -0300 Subject: [PATCH 8/8] fix: address 13 cubic review findings - test/auth: close client in finally on connect rejection (finding 1) - test/batch: verify input ordering matches output (finding 2) - src/index: re-export ProtocolError as RPCError for backward compat (finding 3) - test/batcher.unit: await client.close() (finding 4) - test/helpers: close readiness probe in finally block (finding 5) - test/helpers: clean up server on adminClient.connect() failure (finding 6) - src/fibp/codec: prevent grow() infinite loop on zero-length buffer (finding 7) - src/connection: send disconnect frame before setting closed flag (finding 8) - src/connection: use >>> 0 to keep request IDs unsigned (finding 9) - src/client: track and close leader connections on client.close() (finding 10) - src/client: handle IPv6 [host]:port in parseAddr (finding 11) - src/client: validate ConsumeOk opcode on leader redirect (finding 12) - src/batcher: validate batch size > 0 (finding 13) --- src/batcher.ts | 4 ++++ src/client.ts | 25 ++++++++++++++++++++++++- src/connection.ts | 4 ++-- src/fibp/codec.ts | 2 +- src/index.ts | 1 + test/auth.test.ts | 12 ++++++++++-- test/batch.test.ts | 5 +++++ test/batcher.unit.test.ts | 8 ++++---- test/helpers.ts | 25 ++++++++++++++++--------- 9 files changed, 67 insertions(+), 19 deletions(-) diff --git a/src/batcher.ts b/src/batcher.ts index b9e590e..1559178 100644 --- a/src/batcher.ts +++ b/src/batcher.ts @@ -51,6 +51,10 @@ export class Batcher { } else { this.maxBatchSize = 1; } + + if (this.maxBatchSize <= 0) { + throw new Error(`batch size must be greater than 0, got ${this.maxBatchSize}`); + } } /** Submit a message for batched enqueue. */ diff --git a/src/client.ts b/src/client.ts index 5d53d34..0684bd3 100644 --- a/src/client.ts +++ b/src/client.ts @@ -88,8 +88,19 @@ export interface ClientOptions { batchSize?: number; } -/** Parse "host:port" into components. Default port 5555. */ +/** Parse "host:port" into components. Default port 5555. Handles IPv6 "[host]:port". */ function parseAddr(addr: string): { host: string; port: number } { + // IPv6 bracket notation: [::1]:5555 + if (addr.startsWith("[")) { + const closeBracket = addr.indexOf("]"); + if (closeBracket === -1) return { host: addr, port: 5555 }; + const host = addr.substring(1, closeBracket); + if (closeBracket + 1 < addr.length && addr[closeBracket + 1] === ":") { + const port = parseInt(addr.substring(closeBracket + 2), 10); + return { host, port: isNaN(port) ? 5555 : port }; + } + return { host, port: 5555 }; + } const lastColon = addr.lastIndexOf(":"); if (lastColon === -1) return { host: addr, port: 5555 }; const host = addr.substring(0, lastColon); @@ -132,6 +143,7 @@ export class Client { private readonly batcher: Batcher | null; private readonly batchModeConfig: "auto" | "linger" | "disabled"; private readonly clientOptions: ClientOptions; + private readonly leaderConns: Set = new Set(); /** * Create a client for the given address. Call `connect()` to establish the connection. @@ -196,6 +208,11 @@ export class Client { if (this.batcher) { await this.batcher.drain(); } + // Close any leader connections opened by consume redirects. + for (const lc of this.leaderConns) { + await lc.close(); + } + this.leaderConns.clear(); await this.conn.close(); } @@ -380,6 +397,7 @@ export class Client { ): AsyncIterable { const { host, port } = parseAddr(leaderAddr); const leaderConn = new Connection(host, port, this.connOpts); + this.leaderConns.add(leaderConn); try { await leaderConn.connect(); const enc = new Encoder(64); @@ -411,6 +429,10 @@ export class Client { ); assertNotError(consumeResp); + if (consumeResp.opcode !== OP_CONSUME_OK) { + throw new ProtocolError(0xff, `unexpected consume response: 0x${consumeResp.opcode.toString(16)}`); + } + while (!streamClosed && !leaderConn.isClosed) { if (deliveryQueue.length === 0) { await new Promise((resolve) => { @@ -429,6 +451,7 @@ export class Client { leaderConn.sendFireAndForget(OP_CANCEL_CONSUME, requestId, Buffer.alloc(0)); leaderConn.unregisterConsumeHandler(requestId); } finally { + this.leaderConns.delete(leaderConn); await leaderConn.close(); } } diff --git a/src/connection.ts b/src/connection.ts index e78ba95..2d6636a 100644 --- a/src/connection.ts +++ b/src/connection.ts @@ -264,7 +264,7 @@ export class Connection extends EventEmitter { /** Allocate the next request ID. Wraps at 2^32. */ allocRequestId(): number { const id = this.nextRequestId; - this.nextRequestId = (this.nextRequestId + 1) & 0xffffffff; + this.nextRequestId = ((this.nextRequestId + 1) & 0xffffffff) >>> 0; if (this.nextRequestId === 0) this.nextRequestId = 1; return id; } @@ -338,13 +338,13 @@ export class Connection extends EventEmitter { /** Gracefully close the connection. */ async close(): Promise { if (this.closed) return; - this.closed = true; this.stopPing(); try { this.sendRaw(encodeFrame(OP_DISCONNECT, 0, 0, Buffer.alloc(0))); } catch { // Ignore write errors on close. } + this.closed = true; this.socket?.destroy(); this.socket = null; // Reject all pending. diff --git a/src/fibp/codec.ts b/src/fibp/codec.ts index 3de389e..bb4302c 100644 --- a/src/fibp/codec.ts +++ b/src/fibp/codec.ts @@ -26,7 +26,7 @@ export class Encoder { private grow(needed: number): void { const remaining = this.buf.length - this.pos; if (remaining >= needed) return; - let newSize = this.buf.length * 2; + let newSize = Math.max(this.buf.length * 2, 16); while (newSize - this.pos < needed) newSize *= 2; const next = Buffer.allocUnsafe(newSize); this.buf.copy(next, 0, 0, this.pos); diff --git a/src/index.ts b/src/index.ts index bb22992..6eb6d2c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -20,4 +20,5 @@ export { ApiKeyNotFoundError, ChannelFullError, ProtocolError, + ProtocolError as RPCError, } from "./errors"; diff --git a/test/auth.test.ts b/test/auth.test.ts index 07f3fe1..d555d96 100644 --- a/test/auth.test.ts +++ b/test/auth.test.ts @@ -45,12 +45,20 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("TLS + API key auth", () => { it("connect fails without API key (unauthenticated)", async () => { const client = new Client(server.addr); // The handshake should be rejected without a valid API key. - await expect(client.connect()).rejects.toThrow(); + try { + await expect(client.connect()).rejects.toThrow(); + } finally { + await client.close(); + } }); it("connect fails with wrong API key", async () => { const client = new Client(server.addr, { apiKey: "wrong-key" }); - await expect(client.connect()).rejects.toThrow(); + try { + await expect(client.connect()).rejects.toThrow(); + } finally { + await client.close(); + } }); it("consume works with valid API key", async () => { diff --git a/test/batch.test.ts b/test/batch.test.ts index 22751de..47e370f 100644 --- a/test/batch.test.ts +++ b/test/batch.test.ts @@ -95,6 +95,11 @@ describe.skipIf(!FILA_SERVER_AVAILABLE)("Enqueue operations", () => { expect(results[1].success).toBe(true); if (results[0].success && results[1].success) { expect(results[0].messageId).not.toBe(results[1].messageId); + // Verify ordering: first result corresponds to first input, second to second. + expect(typeof results[0].messageId).toBe("string"); + expect(typeof results[1].messageId).toBe("string"); + // IDs are server-assigned in order; the first enqueue should get a smaller ID. + expect(results[0].messageId < results[1].messageId).toBe(true); } } finally { await client.close(); diff --git a/test/batcher.unit.test.ts b/test/batcher.unit.test.ts index d5013f3..a85328c 100644 --- a/test/batcher.unit.test.ts +++ b/test/batcher.unit.test.ts @@ -2,16 +2,16 @@ import { describe, it, expect } from "vitest"; import { Client } from "../src"; describe("Batcher unit tests (no server)", () => { - it("default batch mode is auto", () => { + it("default batch mode is auto", async () => { // Creating a client with default options should not throw. const client = new Client("localhost:9999"); // close() should succeed even without a real server (just closes channel). - client.close(); + await client.close(); }); - it("disabled batch mode creates no batcher", () => { + it("disabled batch mode creates no batcher", async () => { const client = new Client("localhost:9999", { batchMode: "disabled" }); - client.close(); + await client.close(); }); it("auto batch mode with custom maxBatchSize", () => { diff --git a/test/helpers.ts b/test/helpers.ts index ae4e9bb..4f1ee7d 100644 --- a/test/helpers.ts +++ b/test/helpers.ts @@ -89,22 +89,23 @@ export async function startTestServer( let lastErr: unknown; while (Date.now() < deadline && !exited) { + const probe = new Client(addr, { + apiKey: opts?.adminApiKey, + tls: !!opts?.adminTls, + caCert: opts?.adminTls?.caCert, + clientCert: opts?.adminTls?.clientCert, + clientKey: opts?.adminTls?.clientKey, + }); try { - const probe = new Client(addr, { - apiKey: opts?.adminApiKey, - tls: !!opts?.adminTls, - caCert: opts?.adminTls?.caCert, - clientCert: opts?.adminTls?.clientCert, - clientKey: opts?.adminTls?.clientKey, - }); await probe.connect(); await probe.listQueues(); - await probe.close(); ready = true; break; } catch (err) { lastErr = err; await sleep(500); + } finally { + await probe.close(); } } @@ -124,7 +125,13 @@ export async function startTestServer( clientCert: opts?.adminTls?.clientCert, clientKey: opts?.adminTls?.clientKey, }); - await adminClient.connect(); + try { + await adminClient.connect(); + } catch (err) { + proc.kill(); + fs.rmSync(dataDir, { recursive: true, force: true }); + throw err; + } return { addr,