Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
279 changes: 64 additions & 215 deletions bun.lock

Large diffs are not rendered by default.

37 changes: 37 additions & 0 deletions object-storage/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Build stage
FROM oven/bun:1 AS builder

WORKDIR /app

# Copy package files
COPY package.json bun.lock* ./

# Install all dependencies (including devDependencies needed for build)
RUN bun install --frozen-lockfile

# Copy source code
COPY . .

# Build the application
RUN bun run build

# Runtime stage
FROM oven/bun:1-slim

WORKDIR /app

# Copy package.json for the start script
COPY package.json ./

# Copy only the dist folder from builder
COPY --from=builder /app/dist ./dist

# Set production environment
ENV NODE_ENV=production

# Expose the port (adjust if needed)
EXPOSE 3000

# Start the server
CMD ["bun", "run", "start"]

21 changes: 11 additions & 10 deletions object-storage/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,18 @@
"private": true,
"type": "module",
"scripts": {
"dev": "deco dev --vite",
"configure": "deco configure",
"gen": "deco gen --output=shared/deco.gen.ts",
"deploy": "npm run build && deco deploy ./dist/server",
"check": "tsc --noEmit",
"build": "bun --bun vite build"
"gen": "deco gen --output=shared/deco.gen.ts",
"dev": "concurrently \"bun run dev:client\" \"NODE_ENV=development bun --hot run server/main.ts\"",
"dev:tunnel": "deco link -p 3000 -- bun run dev",
"dev:client": "bun --bun vite dev",
"build:client": "bun --bun vite build",
"build:server": "NODE_ENV=production bun build server/main.ts --target=bun --outfile=dist/server/main.js",
"build": "bun run build:client && bun run build:server",
"start": "NODE_ENV=production bun run dist/server/main.js"
Comment on lines +9 to +16
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Verify build artifacts match Dockerfile expectations

echo "=== Checking Dockerfile expectations ==="
if [ -f "object-storage/Dockerfile" ]; then
  rg -n "dist/(client|server)" object-storage/Dockerfile
else
  echo "Dockerfile not found at object-storage/Dockerfile"
fi

echo -e "\n=== Checking build script outputs ==="
rg -n "outfile=|outdir=" object-storage/package.json

echo -e "\n=== Checking vite build output config ==="
if [ -f "object-storage/vite.config.ts" ]; then
  rg -n "build\.(outDir|rollupOptions)" object-storage/vite.config.ts
else
  echo "vite.config.ts not found at object-storage/vite.config.ts"
fi

Repository: decocms/mcps

Length of output: 292


🏁 Script executed:

#!/bin/bash
# Find Dockerfile and check full vite config

echo "=== Listing Dockerfile possibilities ==="
find object-storage -name "Dockerfile*" -type f 2>/dev/null

echo -e "\n=== Full vite.config.ts content ==="
if [ -f "object-storage/vite.config.ts" ]; then
  cat object-storage/vite.config.ts
else
  echo "vite.config.ts not found"
fi

echo -e "\n=== Check for root Dockerfile ==="
if [ -f "Dockerfile" ]; then
  echo "Root Dockerfile exists"
  head -50 Dockerfile
fi

Repository: decocms/mcps

Length of output: 600


🏁 Script executed:

#!/bin/bash
cat object-storage/Dockerfile

Repository: decocms/mcps

Length of output: 689


🏁 Script executed:

#!/bin/bash
# Check for any build output references or issues

echo "=== Checking package.json build commands ==="
rg -A 2 '"build"' object-storage/package.json

echo -e "\n=== Checking if there are any .gitignore entries for dist ==="
rg -n "^dist" object-storage/.gitignore 2>/dev/null || echo "No .gitignore or dist entry not found"

echo -e "\n=== Checking build:client vite command specifics ==="
rg -B 2 -A 2 "dev:client|build:client" object-storage/package.json

Repository: decocms/mcps

Length of output: 946


Verify the vite build output configuration and document the expected build structure.

The build scripts orchestrate client and server builds, but there's a critical configuration gap: vite.config.ts does not explicitly set build.outDir, so bun run build:client will output to dist/ by default rather than dist/client. This creates a mixed directory structure where client assets and the server bundle (dist/server/main.js) coexist at the root of dist/.

To match the apparent intent (separate client and server builds):

  • Either add build: { outDir: 'dist/client' } to vite.config.ts
  • Or update documentation to clarify that both outputs are intentionally merged in dist/

The Dockerfile copies all of dist/ without issues, but the build structure should be explicitly defined to avoid confusion and prevent accidental asset collisions.

🤖 Prompt for AI Agents
In object-storage/package.json around lines 9 to 16, the client build currently
uses Vite's default output (dist/) which will collide with the server bundle at
dist/server/main.js; either set an explicit outDir in vite.config.ts (e.g.,
build.outDir = "dist/client") so client assets go to dist/client, or update
README/docs to state builds intentionally share dist/ and ensure Dockerfile and
any static-serving paths expect that layout; pick one approach and make the
corresponding change (edit vite.config.ts to add build.outDir or update
documentation and any downstream copy paths) so the build output structure is
explicit and deterministic.

},
"dependencies": {
"@aws-sdk/client-s3": "^3.716.0",
"@aws-sdk/s3-request-presigner": "^3.716.0",
"@decocms/runtime": "0.25.1",
"@decocms/runtime": "1.0.0-alpha.2",
"@radix-ui/react-collapsible": "^1.1.12",
"@radix-ui/react-popover": "^1.1.15",
"@radix-ui/react-slot": "^1.2.3",
Expand All @@ -25,6 +26,7 @@
"@tanstack/react-router-devtools": "^1.121.2",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"hono": "^4.10.7",
"lucide-react": "^0.476.0",
"next-themes": "^0.4.6",
"react": "^19.0.0",
Expand All @@ -36,8 +38,7 @@
"zod": "^3.24.3"
},
"devDependencies": {
"@cloudflare/vite-plugin": "^1.13.4",
"@cloudflare/workers-types": "^4.20251014.0",
"@decocms/vite-plugin": "1.0.0-alpha.1",
"@decocms/mcps-shared": "1.0.0",
"@mastra/core": "^0.24.0",
"@modelcontextprotocol/sdk": "^1.21.0",
Expand Down
15 changes: 6 additions & 9 deletions object-storage/server/lib/s3-client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
* S3 Client factory for creating S3 clients from state configuration.
* Supports any S3-compatible storage provider including AWS S3, R2, MinIO, etc.
*/
import { S3Client } from "@aws-sdk/client-s3";
import { S3Client } from "bun";
import type { Env } from "../main.ts";

/**
Expand All @@ -13,21 +13,18 @@ import type { Env } from "../main.ts";
* @returns Configured S3Client instance
*/
export function createS3Client(env: Env): S3Client {
const state = env.DECO_CHAT_REQUEST_CONTEXT.state;
const state = env.DECO_REQUEST_CONTEXT.state;

const config: ConstructorParameters<typeof S3Client>[0] = {
region: state.region,
credentials: {
accessKeyId: state.accessKeyId,
secretAccessKey: state.secretAccessKey,
},
accessKeyId: state.accessKeyId,
secretAccessKey: state.secretAccessKey,
bucket: state.bucketName,
};

// Add custom endpoint if provided (for S3-compatible storage like R2, MinIO, etc.)
if (state.endpoint) {
config.endpoint = state.endpoint;
// For S3-compatible services, we often need to force path style
config.forcePathStyle = true;
}
Comment on lines 18 to 28
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🌐 Web query:

Bun S3Client path-style URLs custom endpoint configuration

💡 Result:

Short answer: use S3Client's endpoint + virtualHostedStyle option. Set endpoint to your custom URL and set virtualHostedStyle: false (or omit virtualHostedStyle if you need path-style and Bun's default is path-style for custom endpoints), e.g.:

import { S3Client } from "bun";

const s3 = new S3Client({
  accessKeyId: "AKIA...",
  secretAccessKey: "...",
  bucket: "my-bucket",
  endpoint: "http://localhost:9000", // or your custom S3-compatible URL
  virtualHostedStyle: false,         // force path-style URLs
});

Bun docs show using endpoint for custom/backends and the virtualHostedStyle flag to choose virtual-hosted vs path-style behavior. [1][2]

Note: AWS recommends virtual-hosted style and has communicated deprecation plans for path-style access, so prefer virtualHostedStyle: true for AWS S3 (or when interoperable). [3]

Sources:

  • Bun S3 docs / S3Client (endpoint, virtualHostedStyle examples). [1]
  • Bun API reference for S3Client. [2]
  • AWS documentation on path-style vs virtual-hosted–style and deprecation guidance. [3]

🏁 Script executed:

# Check for any previous implementations or history of forcePathStyle in the repo
git log --all --source --remotes -S "forcePathStyle" -- "object-storage/" 2>/dev/null | head -20

Repository: decocms/mcps

Length of output: 273


🏁 Script executed:

# Search for any test files or documentation related to S3 configuration
fd -e test.ts -e spec.ts -e test.js -e docs.md -o -e "\.md" . object-storage/ 2>/dev/null | head -20

Repository: decocms/mcps

Length of output: 38


🏁 Script executed:

# Check the full s3-client.ts file for context
wc -l object-storage/server/lib/s3-client.ts

Repository: decocms/mcps

Length of output: 97


🏁 Script executed:

cat -n object-storage/server/lib/s3-client.ts

Repository: decocms/mcps

Length of output: 1934


🏁 Script executed:

# Search for virtualHostedStyle usage in the codebase
rg "virtualHostedStyle" object-storage/

Repository: decocms/mcps

Length of output: 38


🏁 Script executed:

# Check if there are any S3-compatible provider tests or documentation
find object-storage/ -type f \( -name "*.md" -o -name "*.test.ts" -o -name "*.spec.ts" \) | xargs cat 2>/dev/null | head -100

Repository: decocms/mcps

Length of output: 2914


Add virtualHostedStyle: false when using S3-compatible endpoints.

Bun's S3Client does not automatically handle path-style URLs for custom endpoints—it requires explicit configuration via the virtualHostedStyle option. S3-compatible providers like MinIO and R2 often require path-style URLs, which means the current code will fail or behave unexpectedly with these providers.

Update the configuration to set virtualHostedStyle: false when a custom endpoint is provided:

if (state.endpoint) {
  config.endpoint = state.endpoint;
  config.virtualHostedStyle = false;
}
🤖 Prompt for AI Agents
In object-storage/server/lib/s3-client.ts around lines 18 to 28, the S3Client
config does not disable virtualHostedStyle for custom endpoints, which causes
S3-compatible providers (MinIO, R2) to require path-style URLs and fail; update
the config when state.endpoint is present to set config.endpoint =
state.endpoint and also set config.virtualHostedStyle = false so the client uses
path-style addressing for custom S3-compatible endpoints.


return new S3Client(config);
Expand All @@ -48,6 +45,6 @@ export function getPresignedUrlExpiration(
return overrideExpiration;
}

const state = env.DECO_CHAT_REQUEST_CONTEXT.state;
const state = env.DECO_REQUEST_CONTEXT.state;
return state.defaultPresignedUrlExpiration ?? 3600; // Default to 1 hour
}
13 changes: 6 additions & 7 deletions object-storage/server/main.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
* application at /.
*/
import { DefaultEnv, withRuntime } from "@decocms/runtime";
import { createAssetServerFetcher } from "@decocms/runtime/bun-asset-server";
import {
type Env as DecoEnv,
StateSchema as BaseStateSchema,
Expand Down Expand Up @@ -54,12 +55,7 @@ export const StateSchema = BaseStateSchema.extend({
* It includes all of the generated types from your
* Deco bindings, along with the default ones.
*/
export type Env = DefaultEnv &
DecoEnv & {
ASSETS: {
fetch: (request: Request, init?: RequestInit) => Promise<Response>;
};
};
export type Env = DefaultEnv & DecoEnv;

const runtime = withRuntime<Env, typeof StateSchema>({
oauth: {
Expand Down Expand Up @@ -97,7 +93,10 @@ const runtime = withRuntime<Env, typeof StateSchema>({
* If you wanted to add custom api routes that dont make sense to be a tool,
* you can add them on this handler.
*/
fetch: (req, env) => env.ASSETS.fetch(req),
fetch: createAssetServerFetcher({
env: process.env.NODE_ENV as "development" | "production" | "test",
assetsDirectory: "./dist/client",
}),
Comment on lines +96 to +99
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Check NODE_ENV usage in package.json scripts, Dockerfile, and fallbacks

echo "=== Checking package.json scripts ==="
if [ -f "object-storage/package.json" ]; then
  rg -n "NODE_ENV" object-storage/package.json
else
  echo "package.json not found at object-storage/package.json"
fi

echo -e "\n=== Checking Dockerfile ==="
if [ -f "object-storage/Dockerfile" ]; then
  rg -n "NODE_ENV" object-storage/Dockerfile
else
  echo "No Dockerfile found at object-storage/Dockerfile, searching for any Dockerfile..."
  find object-storage -name "Dockerfile*" -type f 2>/dev/null
fi

echo -e "\n=== Checking if there's a fallback for undefined NODE_ENV ==="
rg -n "NODE_ENV.*\|\|" object-storage/ 2>/dev/null || echo "No fallback patterns found"

echo -e "\n=== Checking main.ts around the problematic lines ==="
sed -n '90,110p' object-storage/server/main.ts 2>/dev/null || echo "File not accessible"

Repository: decocms/mcps

Length of output: 985


🏁 Script executed:

#!/bin/bash
# Find and examine createAssetServerFetcher implementation

echo "=== Searching for createAssetServerFetcher definition ==="
rg -n "createAssetServerFetcher" object-storage/ -A 5

echo -e "\n=== Checking the import location ==="
rg -n "import.*createAssetServerFetcher" object-storage/

echo -e "\n=== Searching across entire repo ==="
rg -n "function createAssetServerFetcher\|export.*createAssetServerFetcher" --type ts

Repository: decocms/mcps

Length of output: 1056


Add explicit fallback for NODE_ENV to improve type safety.

The code relies on process.env.NODE_ENV being set, and while it is properly set in all deployment scripts (dev, build, start, and Dockerfile), the type cast as "development" | "production" | "test" provides no runtime validation. If executed outside the documented deployment paths, undefined would be passed to createAssetServerFetcher.

Add an explicit fallback:

env: (process.env.NODE_ENV || "production") as "development" | "production" | "test",

This ensures type safety and handles edge cases where NODE_ENV might not be set.

🤖 Prompt for AI Agents
In object-storage/server/main.ts around lines 96 to 99, the code casts
process.env.NODE_ENV to the union type without a runtime fallback which may pass
undefined to createAssetServerFetcher; update the env value to provide a default
(e.g. "production") when NODE_ENV is falsy, then cast to the union type so
createAssetServerFetcher always receives a valid "development" | "production" |
"test" string at runtime.

});

export default runtime;
119 changes: 48 additions & 71 deletions object-storage/server/tools/storage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,6 @@
* - Generating presigned URLs for GET and PUT operations
* - Deleting objects (single and batch)
*/
import {
DeleteObjectCommand,
DeleteObjectsCommand,
GetObjectCommand,
HeadObjectCommand,
ListObjectsV2Command,
PutObjectCommand,
} from "@aws-sdk/client-s3";
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
import { createPrivateTool } from "@decocms/runtime/mastra";
import { z } from "zod";
import type { Env } from "../main.ts";
Expand Down Expand Up @@ -66,26 +57,28 @@ export const createListObjectsTool = (env: Env) =>
execute: async (ctx: any) => {
const { prefix, maxKeys, continuationToken } = ctx;
const s3Client = createS3Client(env);
const state = env.DECO_CHAT_REQUEST_CONTEXT.state;

const command = new ListObjectsV2Command({
Bucket: state.bucketName,
Prefix: prefix,
MaxKeys: maxKeys,
ContinuationToken: continuationToken,
const response = await s3Client.list({
prefix,
maxKeys,
startAfter: continuationToken,
});

const response = await s3Client.send(command);

return {
objects: (response.Contents || []).map((obj) => ({
key: obj.Key!,
size: obj.Size!,
lastModified: obj.LastModified!.toISOString(),
etag: obj.ETag!,
objects: (response.contents || []).map((obj) => ({
key: obj.key,
size: obj.size ?? 0,
lastModified: obj.lastModified
? typeof obj.lastModified === "object"
? (obj.lastModified as Date).toISOString()
: String(obj.lastModified)
: "",
etag: obj.eTag ?? "",
})),
nextContinuationToken: response.NextContinuationToken,
isTruncated: response.IsTruncated ?? false,
nextContinuationToken: response.isTruncated
? response.contents?.at(-1)?.key
: undefined,
isTruncated: response.isTruncated ?? false,
};
},
});
Expand Down Expand Up @@ -114,21 +107,15 @@ export const createGetObjectMetadataTool = (env: Env) =>
execute: async (ctx: any) => {
const { key } = ctx;
const s3Client = createS3Client(env);
const state = env.DECO_CHAT_REQUEST_CONTEXT.state;

const command = new HeadObjectCommand({
Bucket: state.bucketName,
Key: key,
});

const response = await s3Client.send(command);
const stat = await s3Client.file(key).stat();

return {
contentType: response.ContentType,
contentLength: response.ContentLength!,
lastModified: response.LastModified!.toISOString(),
etag: response.ETag!,
metadata: response.Metadata,
contentType: stat.type,
contentLength: stat.size,
lastModified: stat.lastModified.toISOString(),
etag: stat.etag,
metadata: undefined, // Bun's stat doesn't include custom metadata
};
},
});
Expand Down Expand Up @@ -159,15 +146,10 @@ export const createGetPresignedUrlTool = (env: Env) =>
execute: async (ctx: any) => {
const { key, expiresIn } = ctx;
const s3Client = createS3Client(env);
const state = env.DECO_CHAT_REQUEST_CONTEXT.state;
const expirationSeconds = getPresignedUrlExpiration(env, expiresIn);

const command = new GetObjectCommand({
Bucket: state.bucketName,
Key: key,
});

const url = await getSignedUrl(s3Client, command, {
const url = s3Client.file(key).presign({
method: "GET",
expiresIn: expirationSeconds,
});

Expand Down Expand Up @@ -208,17 +190,12 @@ export const createPutPresignedUrlTool = (env: Env) =>
execute: async (ctx: any) => {
const { key, expiresIn, contentType } = ctx;
const s3Client = createS3Client(env);
const state = env.DECO_CHAT_REQUEST_CONTEXT.state;
const expirationSeconds = getPresignedUrlExpiration(env, expiresIn);

const command = new PutObjectCommand({
Bucket: state.bucketName,
Key: key,
ContentType: contentType,
});

const url = await getSignedUrl(s3Client, command, {
const url = s3Client.file(key).presign({
method: "PUT",
expiresIn: expirationSeconds,
type: contentType,
});

return {
Expand All @@ -245,14 +222,8 @@ export const createDeleteObjectTool = (env: Env) =>
execute: async (ctx: any) => {
const { key } = ctx;
const s3Client = createS3Client(env);
const state = env.DECO_CHAT_REQUEST_CONTEXT.state;

const command = new DeleteObjectCommand({
Bucket: state.bucketName,
Key: key,
});

await s3Client.send(command);
await s3Client.file(key).delete();
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

wc -l object-storage/server/tools/storage.ts

Repository: decocms/mcps

Length of output: 98


🏁 Script executed:

sed -n '210,240p' object-storage/server/tools/storage.ts

Repository: decocms/mcps

Length of output: 872


🏁 Script executed:

rg -n "createDeleteObjectTool" object-storage/server/tools/storage.ts -A 20

Repository: decocms/mcps

Length of output: 847


🏁 Script executed:

rg -n "\.delete\(\)" object-storage/server/tools/storage.ts -B 5 -A 5

Repository: decocms/mcps

Length of output: 751


🏁 Script executed:

rg -n "createDeleteObjectTool\|DELETE_OBJECT" object-storage/server/tools/storage.ts -B 2 -A 2

Repository: decocms/mcps

Length of output: 38


🏁 Script executed:

rg -n "DELETE_OBJECT" --type=ts -B 3 -A 3 | head -50

Repository: decocms/mcps

Length of output: 2708


🏁 Script executed:

grep -r "\.delete\(\)" object-storage --type=ts -B 3 -A 3

Repository: decocms/mcps

Length of output: 176


🏁 Script executed:

sed -n '250,280p' object-storage/server/tools/storage.ts

Repository: decocms/mcps

Length of output: 1045


🏁 Script executed:

rg -n "Promise.allSettled" object-storage --type=ts -B 5 -A 10

Repository: decocms/mcps

Length of output: 1359


🏁 Script executed:

rg -n "execute.*async" object-storage/server/tools/storage.ts -A 15

Repository: decocms/mcps

Length of output: 3027


Add error handling to match the batch delete pattern.

The single delete operation lacks error handling, while the batch delete implementation (line 267–277) correctly uses Promise.allSettled() to handle failures. When s3Client.file(key).delete() fails (404, 403, network errors), it throws an unhandled exception. Update the output schema to include an optional error field and wrap the delete in error handling to gracefully report failures to callers, consistent with the DELETE_OBJECTS tool.

🤖 Prompt for AI Agents
In object-storage/server/tools/storage.ts around line 226, the single-object
delete call awaits s3Client.file(key).delete() without error handling and must
mirror the batch delete pattern: update the function's output schema/type to
include an optional error?: string field, wrap the delete in a try/catch, and on
success return the same result shape as batch (e.g., { key, status: 'fulfilled'
}), while on failure catch the error and return { key, status: 'rejected',
error: error.message || String(error) } so callers receive graceful, consistent
failure details.


return {
success: true,
Expand Down Expand Up @@ -291,23 +262,29 @@ export const createDeleteObjectsTool = (env: Env) =>
execute: async (ctx: any) => {
const { keys } = ctx;
const s3Client = createS3Client(env);
const state = env.DECO_CHAT_REQUEST_CONTEXT.state;

const command = new DeleteObjectsCommand({
Bucket: state.bucketName,
Delete: {
Objects: keys.map((key: string) => ({ Key: key })),
},
});
// Bun doesn't have batch delete, so we use Promise.allSettled for parallel deletes
const results = await Promise.allSettled(
keys.map((key: string) => s3Client.file(key).delete()),
);

const response = await s3Client.send(command);
const deleted: string[] = [];
const errors: Array<{ key: string; message: string }> = [];

results.forEach((result, index) => {
if (result.status === "fulfilled") {
deleted.push(keys[index]);
} else {
errors.push({
key: keys[index],
message: result.reason?.message || "Unknown error",
});
}
});

return {
deleted: (response.Deleted || []).map((obj) => obj.Key!),
errors: (response.Errors || []).map((err) => ({
key: err.Key!,
message: err.Message || "Unknown error",
})),
deleted,
errors,
};
},
});
Expand Down
2 changes: 1 addition & 1 deletion object-storage/tsconfig.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
},

/* Types */
"types": ["vite/client", "@cloudflare/workers-types"]
"types": ["vite/client"]
},
"include": [
"view",
Expand Down
30 changes: 2 additions & 28 deletions object-storage/vite.config.ts
Original file line number Diff line number Diff line change
@@ -1,42 +1,16 @@
import { defineConfig } from "vite";
import react from "@vitejs/plugin-react";
import tailwindcss from "@tailwindcss/vite";
import { cloudflare } from "@cloudflare/vite-plugin";
import deco from "@decocms/mcps-shared/vite-plugin";
import deco from "@decocms/vite-plugin";

import path from "path";

const VITE_SERVER_ENVIRONMENT_NAME = "server";

// https://vitejs.dev/config/
export default defineConfig({
plugins: [
react(),
cloudflare({
configPath: "wrangler.toml",
viteEnvironment: {
name: VITE_SERVER_ENVIRONMENT_NAME,
},
}),
tailwindcss(),
deco(),
],
plugins: [react(), tailwindcss(), deco({ target: "bun" })],
resolve: {
alias: {
"@": path.resolve(__dirname, "./view/src"),
},
},

define: {
// Ensure proper module definitions for Cloudflare Workers context
"process.env.NODE_ENV": JSON.stringify(
process.env.NODE_ENV || "development",
),
global: "globalThis",
// '__filename': '""',
// '__dirname': '""',
},

// Clear cache more aggressively
cacheDir: "node_modules/.vite",
});
Loading