diff --git a/app/assets/app.css b/app/assets/app.css
index b9c4069..93a3d1c 100644
--- a/app/assets/app.css
+++ b/app/assets/app.css
@@ -12,3 +12,10 @@
column-gap: 20px;
font-size: 16px;
}
+
+#searches-nav {
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ column-gap: 20px;
+}
diff --git a/app/graphql/contacts/find.graphql b/app/graphql/contacts/find.graphql
new file mode 100644
index 0000000..20a021d
--- /dev/null
+++ b/app/graphql/contacts/find.graphql
@@ -0,0 +1,10 @@
+query contacts_find($id: ID!) {
+ records(per_page: 1, filter: { id: { value: $id } }) {
+ results {
+ id
+ created_at
+ body: property(name: "body")
+ email: property(name: "email")
+ }
+ }
+}
diff --git a/app/lib/consumers/contact_created/create_embedding.liquid b/app/lib/consumers/contact_created/create_embedding.liquid
new file mode 100644
index 0000000..ae74d05
--- /dev/null
+++ b/app/lib/consumers/contact_created/create_embedding.liquid
@@ -0,0 +1,21 @@
+{% liquid
+ function contact = 'queries/contacts/find', id: event.id
+ log contact, type: 'contact'
+
+ function response = 'modules/openai/commands/openai/fetch_embeddings', object: contact.body
+ log response, type: 'response'
+
+ if response.error == blank
+ assign pos_embedding_input = '{}' | parse_json
+ assign metadata = '{}' | parse_json
+
+ hash_assign metadata['contact_id'] = contact.id
+ hash_assign pos_embedding_input['metadata'] = metadata
+ hash_assign pos_embedding_input['embedding'] = response.data.first.embedding
+ hash_assign pos_embedding_input['content'] = contact.body
+ function pos_embedding = 'modules/openai/commands/embeddings/create', object: pos_embedding_input
+ log pos_embedding, type: 'pos_embedding'
+ endif
+%}
+
+
diff --git a/app/lib/queries/contacts/find.liquid b/app/lib/queries/contacts/find.liquid
new file mode 100644
index 0000000..b82e81e
--- /dev/null
+++ b/app/lib/queries/contacts/find.liquid
@@ -0,0 +1,9 @@
+{% liquid
+ if id == null
+ log "contacts/find requires ID", type: "ERROR"
+ endif
+
+ graphql res = 'contacts/find', id: id
+
+ return res.records.results.first
+%}
diff --git a/app/migrations/20251103131544_populate_embeddings_for_existing_contacts.liquid b/app/migrations/20251103131544_populate_embeddings_for_existing_contacts.liquid
new file mode 100644
index 0000000..5ad14b6
--- /dev/null
+++ b/app/migrations/20251103131544_populate_embeddings_for_existing_contacts.liquid
@@ -0,0 +1,7 @@
+
+ {% liquid
+ comment
+ populate_embeddings_for_existing_contacts migration code goes below
+ endcomment
+ %}
+
\ No newline at end of file
diff --git a/app/pos-modules.json b/app/pos-modules.json
index 4d870f2..b1a5335 100644
--- a/app/pos-modules.json
+++ b/app/pos-modules.json
@@ -2,6 +2,7 @@
"modules": {
"core": "2.0.7",
"common-styling": "1.32.0",
- "user": "5.1.1"
+ "user": "5.1.1",
+ "openai": "1.1.0"
}
}
\ No newline at end of file
diff --git a/app/pos-modules.lock.json b/app/pos-modules.lock.json
index 4d870f2..b1a5335 100644
--- a/app/pos-modules.lock.json
+++ b/app/pos-modules.lock.json
@@ -2,6 +2,7 @@
"modules": {
"core": "2.0.7",
"common-styling": "1.32.0",
- "user": "5.1.1"
+ "user": "5.1.1",
+ "openai": "1.1.0"
}
}
\ No newline at end of file
diff --git a/app/views/pages/admin/contacts/ai_search.liquid b/app/views/pages/admin/contacts/ai_search.liquid
new file mode 100644
index 0000000..faaaee2
--- /dev/null
+++ b/app/views/pages/admin/contacts/ai_search.liquid
@@ -0,0 +1,9 @@
+{% liquid
+ if context.params.query != blank
+ function response = 'modules/openai/commands/openai/fetch_embeddings', object: context.params.query
+
+ function related_embeddings = 'modules/openai/queries/embeddings/search', related_to: response.data.first.embedding, limit: 3, metadata: null
+ endif
+
+ render 'admin/contacts/ai_results', embeddings: related_embeddings, response: response
+%}
diff --git a/app/views/partials/admin/contacts/ai_results.liquid b/app/views/partials/admin/contacts/ai_results.liquid
new file mode 100644
index 0000000..2859995
--- /dev/null
+++ b/app/views/partials/admin/contacts/ai_results.liquid
@@ -0,0 +1,32 @@
+{% render 'admin/contacts/nav' %}
+
+
AI Search results
+
+
OpenAI Response:
+
+
+ {{ response }}
+
+
+
+
+
+
+
+ {% for embedding in embeddings.results %}
+
+ -
+ ID
+ {{ embedding.metadata.contact_id }}
+
+ -
+ Body
+ {{ embedding.content | truncate: 40 }}
+
+
+ {% endfor %}
+
+
diff --git a/app/views/partials/admin/contacts/nav.liquid b/app/views/partials/admin/contacts/nav.liquid
new file mode 100644
index 0000000..c88871f
--- /dev/null
+++ b/app/views/partials/admin/contacts/nav.liquid
@@ -0,0 +1,26 @@
+
diff --git a/app/views/partials/admin/index.liquid b/app/views/partials/admin/index.liquid
index 1164c42..1d7be79 100644
--- a/app/views/partials/admin/index.liquid
+++ b/app/views/partials/admin/index.liquid
@@ -1,14 +1,4 @@
-
-
-
+{% render 'admin/contacts/nav' %}
Contacts
diff --git a/modules/openai/public/api_calls/chat.liquid b/modules/openai/public/api_calls/chat.liquid
new file mode 100644
index 0000000..5097b18
--- /dev/null
+++ b/modules/openai/public/api_calls/chat.liquid
@@ -0,0 +1,11 @@
+---
+to: >
+ https://api.openai.com/{{ context.constants['modules/openai/OPENAI_VERSION'] | default: "v1" }}/chat/completions
+request_type: POST
+headers: >
+ {
+ "Authorization": "Bearer {{ context.constants['modules/openai/OPENAI_SECRET_TOKEN'] }}",
+ "Content-Type": "application/json"
+ }
+---
+{{ data }}
diff --git a/modules/openai/public/api_calls/responses.liquid b/modules/openai/public/api_calls/responses.liquid
new file mode 100644
index 0000000..e134b42
--- /dev/null
+++ b/modules/openai/public/api_calls/responses.liquid
@@ -0,0 +1,11 @@
+---
+to: >
+ https://api.openai.com/{{ context.constants['modules/openai/OPENAI_VERSION'] | default: "v1" }}/responses
+request_type: POST
+headers: >
+ {
+ "Authorization": "Bearer {{ context.constants['modules/openai/OPENAI_SECRET_TOKEN'] }}",
+ "Content-Type": "application/json"
+ }
+---
+{{ data }}
diff --git a/modules/openai/public/graphql/embeddings/create.graphql b/modules/openai/public/graphql/embeddings/create.graphql
new file mode 100644
index 0000000..9e4a101
--- /dev/null
+++ b/modules/openai/public/graphql/embeddings/create.graphql
@@ -0,0 +1,17 @@
+mutation embedding_create($embedding: [Float!]!, $content: String!, $metadata: HashObject, $token_count: Int){
+ embedding_create: embedding_create_rc(
+ embedding: {
+ embedding: $embedding
+ content: $content
+ metadata: $metadata
+ token_count: $token_count
+ }
+ )
+ {
+ id
+ embedding
+ content
+ metadata
+ token_count
+ }
+}
diff --git a/modules/openai/public/graphql/embeddings/delete.graphql b/modules/openai/public/graphql/embeddings/delete.graphql
new file mode 100644
index 0000000..7c1165d
--- /dev/null
+++ b/modules/openai/public/graphql/embeddings/delete.graphql
@@ -0,0 +1,7 @@
+mutation embedding_delete($id: ID!){
+ embedding_delete: embedding_delete_rc(id: $id){
+ id
+ content
+ metadata
+ }
+}
diff --git a/modules/openai/public/graphql/embeddings/search.graphql b/modules/openai/public/graphql/embeddings/search.graphql
new file mode 100644
index 0000000..74399c6
--- /dev/null
+++ b/modules/openai/public/graphql/embeddings/search.graphql
@@ -0,0 +1,27 @@
+query embeddings($limit: Int = 2000, $page: Int = 1, $metadata: [JsonbFilter!], $related_to: [Float!]) {
+ embeddings: embeddings_rc(
+ per_page: $limit,
+ page: $page
+ filter: { metadata: $metadata }
+ sort: {
+ embedding: {
+ order: euclidean
+ embedding: $related_to
+ }
+ }
+
+ )
+ {
+ total_entries
+ has_next_page
+ has_previous_page
+ total_pages
+ results {
+ id
+ metadata
+ embedding
+ content
+ token_count
+ }
+ }
+}
diff --git a/modules/openai/public/graphql/embeddings/update.graphql b/modules/openai/public/graphql/embeddings/update.graphql
new file mode 100644
index 0000000..6058722
--- /dev/null
+++ b/modules/openai/public/graphql/embeddings/update.graphql
@@ -0,0 +1,18 @@
+mutation embedding_update($id: ID!, $embedding: [Float!]!, $content: String!, $metadata: HashObject, $token_count: Int){
+ embedding_update: embedding_update_rc(
+ id: $id
+ embedding: {
+ embedding: $embedding
+ content: $content
+ metadata: $metadata
+ token_count: $token_count
+ }
+ )
+ {
+ id
+ embedding
+ content
+ metadata
+ token_count
+ }
+}
diff --git a/modules/openai/public/lib/commands/embeddings/create.liquid b/modules/openai/public/lib/commands/embeddings/create.liquid
new file mode 100644
index 0000000..d5da6f2
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/create.liquid
@@ -0,0 +1,11 @@
+{% liquid
+ function object = 'modules/openai/commands/embeddings/create/build', object: object
+ function object = 'modules/openai/commands/embeddings/create/check', object: object
+
+ if object.valid
+ function object = 'modules/openai/commands/embeddings/create/execute', object: object
+ endif
+
+ return object
+%}
+
diff --git a/modules/openai/public/lib/commands/embeddings/create/build.liquid b/modules/openai/public/lib/commands/embeddings/create/build.liquid
new file mode 100644
index 0000000..44f6bf5
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/create/build.liquid
@@ -0,0 +1,10 @@
+{% liquid
+ assign embedding = '{}' | parse_json
+
+ hash_assign embedding['embedding'] = object.embedding
+ hash_assign embedding['content'] = object.content
+ hash_assign embedding['metadata'] = object.metadata
+ hash_assign embedding['token_count'] = object.token_count
+
+ return embedding
+%}
diff --git a/modules/openai/public/lib/commands/embeddings/create/check.liquid b/modules/openai/public/lib/commands/embeddings/create/check.liquid
new file mode 100644
index 0000000..eb26138
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/create/check.liquid
@@ -0,0 +1,11 @@
+{% liquid
+ assign c = '{ "errors": {}, "valid": true }' | parse_json
+
+ function c = 'modules/core/validations/presence', c: c, object: object, field_name: 'embedding'
+ function c = 'modules/core/validations/length', c: c, object: object, field_name: 'embedding', is: 1536
+ function c = 'modules/core/validations/presence', c: c, object: object, field_name: 'content'
+
+ assign object = object | hash_merge: c
+
+ return object
+%}
diff --git a/modules/openai/public/lib/commands/embeddings/create/execute.liquid b/modules/openai/public/lib/commands/embeddings/create/execute.liquid
new file mode 100644
index 0000000..7674aec
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/create/execute.liquid
@@ -0,0 +1,9 @@
+{% liquid
+ graphql r = 'modules/openai/embeddings/create', args: object
+
+ assign object = r.embedding_create
+ hash_assign object['valid'] = true
+
+ return object
+%}
+
diff --git a/modules/openai/public/lib/commands/embeddings/delete.liquid b/modules/openai/public/lib/commands/embeddings/delete.liquid
new file mode 100644
index 0000000..27529b5
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/delete.liquid
@@ -0,0 +1,12 @@
+{% liquid
+
+ function object = 'modules/openai/commands/embeddings/delete/build', object: object, id: id
+ function object = 'modules/openai/commands/embeddings/delete/check', object: object
+
+ if object.valid
+ function object = 'modules/core/commands/execute', mutation_name: 'modules/openai/embeddings/delete', object: object, selection: 'embedding_delete'
+ endif
+
+ return object
+%}
+
diff --git a/modules/openai/public/lib/commands/embeddings/delete/build.liquid b/modules/openai/public/lib/commands/embeddings/delete/build.liquid
new file mode 100644
index 0000000..5315d48
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/delete/build.liquid
@@ -0,0 +1,7 @@
+{% liquid
+ assign id = id | default: object.id
+ assign embedding = '{}' | parse_json
+ hash_assign embedding['id'] = id
+
+ return embedding
+%}
diff --git a/modules/openai/public/lib/commands/embeddings/delete/check.liquid b/modules/openai/public/lib/commands/embeddings/delete/check.liquid
new file mode 100644
index 0000000..67e2c81
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/delete/check.liquid
@@ -0,0 +1,9 @@
+{% liquid
+ assign c = '{ "errors": {}, "valid": true }' | parse_json
+
+ function c = 'modules/core/validations/presence', c: c, object: object, field_name: 'id'
+
+ assign object = object | hash_merge: c
+
+ return object
+%}
diff --git a/modules/openai/public/lib/commands/embeddings/update.liquid b/modules/openai/public/lib/commands/embeddings/update.liquid
new file mode 100644
index 0000000..9b2425f
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/update.liquid
@@ -0,0 +1,11 @@
+{% liquid
+ function object = 'modules/openai/commands/embeddings/update/build', object: object, id: id
+ function object = 'modules/openai/commands/embeddings/update/check', object: object
+
+ if object.valid
+ function object = 'modules/openai/commands/embeddings/update/execute', object: object
+ endif
+
+ return object
+%}
+
diff --git a/modules/openai/public/lib/commands/embeddings/update/build.liquid b/modules/openai/public/lib/commands/embeddings/update/build.liquid
new file mode 100644
index 0000000..3d57b43
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/update/build.liquid
@@ -0,0 +1,13 @@
+{% liquid
+ assign id = id | default: object.id
+
+ assign embedding = '{}' | parse_json
+ hash_assign embedding['id'] = id
+
+ hash_assign embedding['embedding'] = object.embedding
+ hash_assign embedding['content'] = object.content
+ hash_assign embedding['metadata'] = object.metadata
+ hash_assign embedding['token_count'] = object.token_count
+
+ return embedding
+%}
diff --git a/modules/openai/public/lib/commands/embeddings/update/check.liquid b/modules/openai/public/lib/commands/embeddings/update/check.liquid
new file mode 100644
index 0000000..866cfd6
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/update/check.liquid
@@ -0,0 +1,12 @@
+{% liquid
+ assign c = '{ "errors": {}, "valid": true }' | parse_json
+
+ function c = 'modules/core/validations/presence', c: c, object: object, field_name: 'id'
+ function c = 'modules/core/validations/presence', c: c, object: object, field_name: 'embedding'
+ function c = 'modules/core/validations/length', c: c, object: object, field_name: 'embedding', is: 1536
+ function c = 'modules/core/validations/presence', c: c, object: object, field_name: 'content'
+
+ assign object = object | hash_merge: c
+
+ return object
+%}
diff --git a/modules/openai/public/lib/commands/embeddings/update/execute.liquid b/modules/openai/public/lib/commands/embeddings/update/execute.liquid
new file mode 100644
index 0000000..3909418
--- /dev/null
+++ b/modules/openai/public/lib/commands/embeddings/update/execute.liquid
@@ -0,0 +1,9 @@
+{% liquid
+ graphql r = 'modules/openai/embeddings/update', args: object
+
+ assign object = r.embedding_update
+ hash_assign object['valid'] = true
+
+ return object
+%}
+
diff --git a/modules/openai/public/lib/commands/openai/chat/build.liquid b/modules/openai/public/lib/commands/openai/chat/build.liquid
new file mode 100644
index 0000000..aed1ea7
--- /dev/null
+++ b/modules/openai/public/lib/commands/openai/chat/build.liquid
@@ -0,0 +1,105 @@
+{%- comment -%}
+Creates an OpenAI API request data with customizable parameters.
+object: JSON Object
+ This object contains the parameters for the OpenAI API request.
+ It can include model, temperature, system_message, user_message, user_images,
+ response_format_schema_json, and response_format_required_fields.
+
+object fields:
+ model: (optional) String
+ - The OpenAI model to use. Defaults to "gpt-4o-mini" if not provided.
+
+ temperature: (optional) Number
+ - Controls the randomness of the output. Defaults to 1 if not provided.
+
+ system_message: (optional) String
+ - The system message that sets the behavior of the AI.
+
+ user_message: (mandatory) String
+ - The main text prompt/message from the user.
+
+ user_images: (optional) Array of Strings
+ - Array of image URLs or Base64 to include in the prompt.
+ - Each URL will be added as an input_image type message.
+
+ response_format_schema_json: (optional) Object
+ - JSON schema that defines the structure of the expected response.
+ - Used to format the AI's response in a specific JSON structure.
+ - It's strongly recommended to provide this schema for structured outputs for predictive results.
+
+ response_format_required_fields: (optional) Array of Strings
+ - Specifies which fields in the response schema are required.
+ - If not provided, all fields in the schema will be considered required.
+
+Returns:
+ JSON Object: A formatted data ready for OpenAI API request
+
+See also: https://platform.openai.com/docs/api-reference/chat
+ https://platform.openai.com/docs/guides/structured-outputs
+
+{%- endcomment -%}
+{%- parse_json data_frame -%}
+{
+ "model": "gpt-4o-mini",
+ "temperature": 1,
+ "messages": [
+ {
+ "role": "system",
+ "content": ""
+ },
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": ""
+ }
+ ]
+ }
+ ],
+ "response_format": {
+ "type": "json_schema",
+ "json_schema": {
+ "name": "prompt_response_schema",
+ "schema": {
+ "type": "object",
+ "properties": {
+ },
+ "additionalProperties": false
+ }
+ }
+ }
+}
+{%- endparse_json -%}
+
+
+{%- liquid
+ hash_assign data_frame['model'] = object['model'] | default: "gpt-4o-mini"
+ hash_assign data_frame['temperature'] = object['temperature'] | default: 1
+ hash_assign data_frame['messages'][0]['content'] = object['system_message'] | default: ""
+ hash_assign data_frame['messages'][1]['content'][0]['text'] = object['user_message'] | default: ""
+
+ assign user_images = object['user_images'] | default: '[]' | parse_json
+
+ if user_images.size > 0
+ assign user_content = data_frame['messages'][1]['content']
+
+ for item in user_images
+ assign img_object = null | hash_merge: type: "image_url" | hash_merge: image_url: null
+ hash_assign img_object['image_url'] = null | hash_merge: url: item
+ assign user_content = user_content | array_add: img_object
+ endfor
+
+ hash_assign data_frame['messages'][1]['content'] = user_content
+ endif
+
+ assign response_format_schema_json = object['response_format_schema_json'] | default: null
+
+ if response_format_schema_json != blank
+ hash_assign data_frame['response_format']['json_schema']['schema']['properties'] = response_format_schema_json
+ else
+ hash_assign data_frame['response_format'] = null
+ endif
+
+ return data_frame
+-%}
diff --git a/modules/openai/public/lib/commands/openai/chat/check.liquid b/modules/openai/public/lib/commands/openai/chat/check.liquid
new file mode 100644
index 0000000..bec7854
--- /dev/null
+++ b/modules/openai/public/lib/commands/openai/chat/check.liquid
@@ -0,0 +1,10 @@
+{% liquid
+ assign c = '{ "errors": {}, "valid": true }' | parse_json
+
+ function c = 'modules/core/validations/presence', c: c, object: object.messages[1].content[0], field_name: 'text'
+
+ assign object = object | hash_merge: c
+
+ return object
+%}
+
diff --git a/modules/openai/public/lib/commands/openai/chat/completions.liquid b/modules/openai/public/lib/commands/openai/chat/completions.liquid
new file mode 100644
index 0000000..df2c910
--- /dev/null
+++ b/modules/openai/public/lib/commands/openai/chat/completions.liquid
@@ -0,0 +1,22 @@
+{%- liquid
+ function object = 'modules/openai/commands/openai/chat/build', object: object;
+ function checked_object = 'modules/openai/commands/openai/chat/check', object: object;
+
+ if checked_object.valid
+ graphql response = 'modules/core/api_calls/send', data: object, template: 'modules/openai/chat' | dig: 'api_call'
+
+ assign response_code = response.response.status
+
+ if response_code != 200
+ assign error_message = "ERROR: OpenAI API chat call failed with status code #" | append: response_code
+ log response.response.body, type: error_message
+ return null
+ else
+ assign response_body = response.response.body | parse_json
+ return response_body.choices[0].message.content | parse_json
+ endif
+ else
+ log checked_object, type: "ERROR: Request object is invalid"
+ return null
+ endif
+-%}
diff --git a/modules/openai/public/lib/commands/openai/fetch_embeddings.liquid b/modules/openai/public/lib/commands/openai/fetch_embeddings.liquid
new file mode 100644
index 0000000..4ec35e8
--- /dev/null
+++ b/modules/openai/public/lib/commands/openai/fetch_embeddings.liquid
@@ -0,0 +1,17 @@
+{% liquid
+
+ function object = 'modules/openai/commands/openai/fetch_embeddings/build', object: object
+ function object = 'modules/openai/commands/openai/fetch_embeddings/check', object: object
+
+ if object.valid
+ function object = 'modules/core/commands/execute', mutation_name: 'modules/core/api_calls/send', object: object, selection: 'api_call'
+ assign json_body = object.response.body | parse_json
+ if object.response.status != 200
+ assign err = 'modules/openai fetch embeddings error: ' | append: json_body.error
+ log err, type: 'ERROR'
+ endif
+
+ endif
+
+ return json_body
+%}
diff --git a/modules/openai/public/lib/commands/openai/fetch_embeddings/build.liquid b/modules/openai/public/lib/commands/openai/fetch_embeddings/build.liquid
new file mode 100644
index 0000000..8d263ff
--- /dev/null
+++ b/modules/openai/public/lib/commands/openai/fetch_embeddings/build.liquid
@@ -0,0 +1,34 @@
+{% parse_json request_object %}
+{% assign model = context.constants['modules/openai/OPENAI_MODEL'] | default: "text-embedding-ada-002" %}
+{% assign version = context.constants['modules/openai/OPENAI_VERSION'] | default: "v1" %}
+{
+ "template": "modules/core/generic",
+ "data": {
+ "to": "https://api.openai.com/{{ version }}/embeddings",
+ "headers": {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {{ context.constants['modules/openai/OPENAI_SECRET_TOKEN'] }}"
+ },
+ "request_type": "POST",
+ "payload": { "model": "{{ model }}" }
+ }
+}
+{% endparse_json %}
+
+{% liquid
+
+ if object != blank
+ assign arr = object
+ assign obj_type = object | type_of
+ if obj_type != 'Array'
+ assign arr = '[]' | parse_json | add_to_array: object
+ endif
+ assign arr = arr
+ hash_assign request_object['data']['payload']['input'] = arr | compact
+ else
+ log "modules/openai/fetch_embeddings - object is null, should be an array of strings that will be transformed to embeddings", type: 'ERROR'
+ endif
+
+
+ return request_object
+%}
diff --git a/modules/openai/public/lib/commands/openai/fetch_embeddings/check.liquid b/modules/openai/public/lib/commands/openai/fetch_embeddings/check.liquid
new file mode 100644
index 0000000..2a5d10c
--- /dev/null
+++ b/modules/openai/public/lib/commands/openai/fetch_embeddings/check.liquid
@@ -0,0 +1,10 @@
+{% liquid
+ assign c = '{ "errors": {}, "valid": true }' | parse_json
+
+ function c = 'modules/core/validations/presence', c: c, object: object.data.payload, field_name: 'input'
+
+ assign object = object | hash_merge: c
+
+ return object
+%}
+
diff --git a/modules/openai/public/lib/commands/openai/responses/build.liquid b/modules/openai/public/lib/commands/openai/responses/build.liquid
new file mode 100644
index 0000000..c4be056
--- /dev/null
+++ b/modules/openai/public/lib/commands/openai/responses/build.liquid
@@ -0,0 +1,115 @@
+{%- comment -%}
+Creates an OpenAI API request data with customizable parameters.
+object: JSON Object
+ This object contains the parameters for the OpenAI API request.
+ It can include model, temperature, system_message, user_message, user_images,
+ response_format_schema_json, and response_format_required_fields.
+
+object fields:
+ model: (optional) String
+ - The OpenAI model to use. Defaults to "gpt-4o-mini" if not provided.
+
+ temperature: (optional) Number
+ - Controls the randomness of the output. Defaults to 1 if not provided.
+
+ system_message: (optional) String
+ - The system message that sets the behavior of the AI.
+
+ user_message: (mandatory) String
+ - The main text prompt/message from the user.
+
+ user_images: (optional) Array of Strings
+ - Array of image URLs or Base64 to include in the prompt.
+ - Each URL will be added as an input_image type message.
+
+ response_format_schema_json: (optional) Object
+ - JSON schema that defines the structure of the expected response.
+ - Used to format the AI's response in a specific JSON structure.
+ - It's strongly recommended to provide this schema for structured outputs for predictive results.
+
+ response_format_required_fields: (optional) Array of Strings
+ - Specifies which fields in the response schema are required.
+ - If not provided, all fields in the schema will be considered required.
+
+Returns:
+ JSON Object: A formatted data ready for OpenAI API request
+
+See also: https://platform.openai.com/docs/api-reference/responses/create
+ https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses
+ https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses
+ https://platform.openai.com/docs/api-reference/chat
+
+{%- endcomment -%}
+{%- parse_json data_frame -%}
+{
+ "model": "gpt-4o-mini",
+ "temperature": 1,
+ "input": [
+ {
+ "role": "system",
+ "content": ""
+ },
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "input_text",
+ "text": ""
+ }
+ ]
+ }
+ ],
+ "text": {
+ "format": {
+ "type": "json_schema",
+ "name": "prompt_response_schema",
+ "schema": {
+ "type": "object",
+ "properties": {
+ },
+ "additionalProperties": false,
+ "required": []
+ }
+ }
+ }
+}
+{%- endparse_json -%}
+
+
+{%- liquid
+ hash_assign data_frame['model'] = object['model'] | default: "gpt-4o-mini"
+ hash_assign data_frame['temperature'] = object['temperature'] | default: 1
+ hash_assign data_frame['input'][0]['content'] = object['system_message'] | default: ""
+ hash_assign data_frame['input'][1]['content'][0]['text'] = object['user_message'] | default: ""
+
+ assign user_images = object['user_images'] | default: '[]' | parse_json
+
+ if user_images.size > 0
+ assign user_content = data_frame['input'][1]['content']
+
+ for item in user_images
+ assign img_object = null | hash_merge: type: "input_image" | hash_merge: image_url: item
+ assign user_content = user_content | array_add: img_object
+ endfor
+
+ hash_assign data_frame['input'][1]['content'] = user_content
+ endif
+
+ assign response_format_schema_json = object['response_format_schema_json'] | default: null
+ assign response_format_required_fields = object['response_format_required_fields'] | default: null
+
+ if response_format_schema_json != blank
+ hash_assign data_frame['text']['format']['schema']['properties'] = response_format_schema_json
+
+ if response_format_required_fields == blank
+ assign required = data_frame['text']['format']['schema']['properties'] | hash_keys
+ hash_assign data_frame['text']['format']['schema']['required'] = required
+ else
+ hash_assign data_frame['text']['format']['schema']['required'] = response_format_required_fields
+ endif
+ else
+ hash_assign data_frame['text'] = null
+ endif
+
+ return data_frame
+-%}
diff --git a/modules/openai/public/lib/commands/openai/responses/check.liquid b/modules/openai/public/lib/commands/openai/responses/check.liquid
new file mode 100644
index 0000000..79fb1db
--- /dev/null
+++ b/modules/openai/public/lib/commands/openai/responses/check.liquid
@@ -0,0 +1,10 @@
+{% liquid
+ assign c = '{ "errors": {}, "valid": true }' | parse_json
+
+ function c = 'modules/core/validations/presence', c: c, object: object.input[1].content[0], field_name: 'text'
+
+ assign object = object | hash_merge: c
+
+ return object
+%}
+
diff --git a/modules/openai/public/lib/commands/openai/responses/create.liquid b/modules/openai/public/lib/commands/openai/responses/create.liquid
new file mode 100644
index 0000000..394f58b
--- /dev/null
+++ b/modules/openai/public/lib/commands/openai/responses/create.liquid
@@ -0,0 +1,22 @@
+{%- liquid
+ function object = 'modules/openai/commands/openai/responses/build', object: object;
+ function checked_object = 'modules/openai/commands/openai/responses/check', object: object;
+
+ if checked_object.valid
+ graphql response = 'modules/core/api_calls/send', data: object, template: 'modules/openai/responses' | dig: 'api_call'
+
+ assign response_code = response.response.status
+
+ if response_code != 200
+ assign error_message = "ERROR: OpenAI Responses API call failed with status code #" | append: response_code
+ log response.response.body, type: error_message
+ return null
+ else
+ assign response_body = response.response.body | parse_json
+ return response_body.output[0].content[0].text | parse_json
+ endif
+ else
+ log checked_object, type: "ERROR: Request object is invalid"
+ return null
+ endif
+-%}
diff --git a/modules/openai/public/lib/hooks/hook_module_info.liquid b/modules/openai/public/lib/hooks/hook_module_info.liquid
new file mode 100644
index 0000000..cb6782c
--- /dev/null
+++ b/modules/openai/public/lib/hooks/hook_module_info.liquid
@@ -0,0 +1,15 @@
+{% comment %}
+ Implements hook_module_info.
+{% endcomment %}
+{% parse_json info %}
+{
+ "name": "<%= &name =%>",
+ "machine_name": "<%= &machine_name =%>",
+ "type": "<%= &type =%>",
+ "version": "<%= &version =%>"
+}
+{% endparse_json %}
+
+{% liquid
+ return info
+%}
diff --git a/modules/openai/public/lib/queries/embeddings/search.liquid b/modules/openai/public/lib/queries/embeddings/search.liquid
new file mode 100644
index 0000000..cbdb01d
--- /dev/null
+++ b/modules/openai/public/lib/queries/embeddings/search.liquid
@@ -0,0 +1,7 @@
+{% liquid
+ assign page = page | to_positive_integer: 1
+
+ graphql r = 'modules/openai/embeddings/search', limit: limit, page: page, metadata: metadata, related_to: related_to
+
+ return r.embeddings
+%}
diff --git a/modules/openai/template-values.json b/modules/openai/template-values.json
new file mode 100644
index 0000000..de6d548
--- /dev/null
+++ b/modules/openai/template-values.json
@@ -0,0 +1,9 @@
+{
+ "name": "pos-module-openai",
+ "machine_name": "openai",
+ "type": "module",
+ "version": "1.1.0",
+ "dependencies": {
+ "core": "^2.0.0"
+ }
+}