Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ description = "ValidMind Library"
license = "Commercial License"
name = "validmind"
readme = "README.pypi.md"
version = "2.7.7"
version = "2.8.0"

[tool.poetry.dependencies]
aiohttp = {extras = ["speedups"], version = "*"}
Expand Down
2 changes: 1 addition & 1 deletion validmind/__version__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "2.7.7"
__version__ = "2.8.0"
88 changes: 15 additions & 73 deletions validmind/ai/test_descriptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,69 +4,22 @@

import json
import os
import re
from concurrent.futures import ThreadPoolExecutor
from typing import List, Optional, Union

from jinja2 import Template

from ..client_config import client_config
from ..logging import get_logger
from ..utils import NumpyEncoder, md_to_html, test_id_to_name
from ..vm_models.figure import Figure
from ..vm_models.result import ResultTable
from .utils import DescriptionFuture, get_client_and_model
from .utils import DescriptionFuture

__executor = ThreadPoolExecutor()
__prompt = None

logger = get_logger(__name__)


def _load_prompt():
global __prompt

if not __prompt:
folder_path = os.path.join(os.path.dirname(__file__), "test_result_description")
with open(os.path.join(folder_path, "system.jinja"), "r") as f:
system_prompt = f.read()
with open(os.path.join(folder_path, "user.jinja"), "r") as f:
user_prompt = f.read()

__prompt = (Template(system_prompt), Template(user_prompt))

return __prompt


def prompt_to_message(role, prompt):
if "[[IMAGE:" not in prompt:
return {"role": role, "content": prompt}

content = []

# Regex pattern to find [[IMAGE:<b64-data>]] markers
pattern = re.compile(r"\[\[IMAGE:(.*?)\]\]", re.DOTALL)

last_index = 0
for match in pattern.finditer(prompt):
# Text before the image marker
start, end = match.span()
if start > last_index:
content.append({"type": "text", "text": prompt[last_index:start]})

content.append({"type": "image_url", "image_url": {"url": match.group(1)}})

last_index = end

# Text after the last image
if last_index < len(prompt):
content.append({"type": "text", "text": prompt[last_index:]})

return {"role": role, "content": content}


def _get_llm_global_context():

# Get the context from the environment variable
context = os.getenv("VALIDMIND_LLM_DESCRIPTIONS_CONTEXT", "")

Expand All @@ -91,13 +44,13 @@ def generate_description(
title: Optional[str] = None,
):
"""Generate the description for the test results"""
from validmind.api_client import generate_test_result_description

if not tables and not figures and not metric:
raise ValueError(
"No tables, unit metric or figures provided - cannot generate description"
)

client, model = get_client_and_model()

# get last part of test id
test_name = title or test_id.split(".")[-1]

Expand All @@ -121,29 +74,18 @@ def generate_description(
else:
summary = None

context = _get_llm_global_context()

input_data = {
"test_name": test_name,
"test_description": test_description,
"title": title,
"summary": summary,
"figures": [figure._get_b64_url() for figure in ([] if tables else figures)],
"context": context,
}
system, user = _load_prompt()

messages = [
prompt_to_message("system", system.render(input_data)),
prompt_to_message("user", user.render(input_data)),
]
response = client.chat.completions.create(
model=model,
temperature=0.0,
messages=messages,
)

return response.choices[0].message.content
return generate_test_result_description(
{
"test_name": test_name,
"test_description": test_description,
"title": title,
"summary": summary,
"figures": [
figure._get_b64_url() for figure in ([] if tables else figures)
],
"context": _get_llm_global_context(),
}
)["content"]


def background_generate_description(
Expand Down
29 changes: 0 additions & 29 deletions validmind/ai/test_result_description/config.yaml

This file was deleted.

73 changes: 0 additions & 73 deletions validmind/ai/test_result_description/context.py

This file was deleted.

124 changes: 0 additions & 124 deletions validmind/ai/test_result_description/image_processing.py

This file was deleted.

Loading
Loading