diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/responsesapi/JsonFormats.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/responsesapi/JsonFormats.scala index 1e4e549e..90d4f6f0 100644 --- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/responsesapi/JsonFormats.scala +++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/responsesapi/JsonFormats.scala @@ -231,6 +231,63 @@ object JsonFormats { outputContentMessageFormat.writes(outputMessage) } + implicit lazy val messageReads: Reads[Message] = Reads { json => + (json \ "content").validate[JsValue] match { + case JsSuccess(JsString(_), _) => + inputTextMessageFormat.reads(json) + + case JsSuccess(JsArray(items), _) => + items.headOption match { + case Some(first) => + (first \ "type").validate[String] match { + case JsSuccess(tpe, _) if tpe == "output_text" || tpe == "refusal" => + outputContentMessageFormat.reads(json) + case JsSuccess(tpe, _) if tpe == "input_text" || tpe == "input_image" || tpe == "input_file" => + inputContentMessageFormat.reads(json) + case _ => + JsError("Unknown message content array element type") + } + case None => + inputContentMessageFormat.reads(json) + } + + case JsSuccess(_, _) => + JsError("Content must be a string or array") + + case JsError(_) => + JsError("Missing 'content' field for Message") + } + } + + //Supports `instructions` as both a String or an array of InputMessageContent + implicit val instructionsReads: Reads[Seq[Message]] = Reads { + // OpenAI docs specify `instructions` as a String + case JsString(str) => + JsSuccess(Seq( + Message.InputContent( + Seq(InputMessageContent.Text(str)), + //Role is not specified. `User` is used as a default + ChatRole.User + ) + )) + // However, using a `Reusable Prompt` returns an array of InputMessageContent + case JsArray(arr) => + arr + .map(_.validate[Message]) + .foldLeft(JsSuccess(Seq.empty[Message]): JsResult[Seq[Message]]) { + case (JsSuccess(acc, _), JsSuccess(m, _)) => JsSuccess(acc :+ m) + case (JsError(e1), JsError(e2)) => JsError(e1 ++ e2) + case (JsError(e), _) => JsError(e) + case (_, JsError(e)) => JsError(e) + } + + case _ => + JsError("instructions must be either a string or an array") + } + + implicit lazy val messageFormat: Format[Message] = Format(messageReads, messageWrites) + + // input hierarchy implicit lazy val inputWrites: Writes[Input] = Writes[Input] { (input: Input) => diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/responsesapi/Response.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/responsesapi/Response.scala index 0e98491d..55240f81 100644 --- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/responsesapi/Response.scala +++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/responsesapi/Response.scala @@ -74,7 +74,7 @@ final case class Response( error: Option[ResponseError] = None, id: String, incompleteDetails: Option[IncompleteDetails] = None, - instructions: Option[String] = None, + instructions: Option[Seq[Message]] = None, maxOutputTokens: Option[Int] = None, metadata: Option[Map[String, String]] = None, model: String, diff --git a/openai-core/src/test/scala/io/cequence/openaiscala/domain/responsesapi/JsonFormatsSpecs.scala b/openai-core/src/test/scala/io/cequence/openaiscala/domain/responsesapi/JsonFormatsSpecs.scala index afd33584..a0e7006c 100644 --- a/openai-core/src/test/scala/io/cequence/openaiscala/domain/responsesapi/JsonFormatsSpecs.scala +++ b/openai-core/src/test/scala/io/cequence/openaiscala/domain/responsesapi/JsonFormatsSpecs.scala @@ -1463,7 +1463,11 @@ class JsonFormatsSpecs extends AnyWordSpecLike with Matchers { error = None, id = "resp_abc123", incompleteDetails = None, - instructions = Some("Act as a helpful assistant"), + instructions = Some(Seq( + Message.InputContent(Seq( + InputMessageContent.Text("Act as a helpful assistant") + ), ChatRole.User) + )), maxOutputTokens = Some(1000), metadata = Some(Map("key1" -> "value1", "key2" -> "value2")), model = "gpt-4o", @@ -1739,6 +1743,191 @@ class JsonFormatsSpecs extends AnyWordSpecLike with Matchers { Pretty, justSemantics = true ) + + // Response with instructions as Array (from Reusable prompt response) + val responseWithInstructionsAsArray = response.copy( + instructions = Some(Seq( + Message.InputContent(Seq(InputMessageContent.Text("Act as a helpful assistant")), ChatRole.System), + Message.InputContent(Seq(InputMessageContent.File(fileId = Some("fileId"))), ChatRole.User) + )) + ) + + testCodec[Response]( + responseWithInstructionsAsArray, + """{ + | "created_at" : 1620000000, + | "id" : "resp_abc123", + | "instructions" : [ + | { + | "type" : "message", + | "content" : [ { + | "type" : "input_text", + | "text" : "Act as a helpful assistant" + | } ], + | "role" : "system" + | } + | , + | { + | "type" : "message", + | "content" : [ { + | "type" : "input_file", + | "file_id" : "fileId" + | } ], + | "role" : "user" + | } + | ], + | "max_output_tokens" : 1000, + | "metadata" : { + | "key1" : "value1", + | "key2" : "value2" + | }, + | "model" : "gpt-4o", + | "output" : [ { + | "content" : [ { + | "annotations" : [ ], + | "text" : "Hello, how can I help you today?", + | "type" : "output_text" + | } ], + | "id" : "output_def456", + | "status" : "completed", + | "type" : "message" + | } ], + | "parallel_tool_calls" : true, + | "reasoning" : { + | "effort" : "medium" + | }, + | "status" : "completed", + | "temperature" : 0.7, + | "text" : { + | "format" : { + | "type" : "text" + | } + | }, + | "tool_choice" : "auto", + | "tools" : [ { + | "name" : "get_weather", + | "parameters" : { + | "properties" : { + | "location" : { + | "description" : "The city and state, e.g. San Francisco, CA", + | "type" : "string" + | } + | }, + | "required" : [ "location" ], + | "type" : "object" + | }, + | "strict" : false, + | "description" : "Get the current weather for a location", + | "type" : "function" + | } ], + | "top_p" : 0.9, + | "truncation" : "auto", + | "usage" : { + | "input_tokens" : 50, + | "input_tokens_details" : { + | "cached_tokens" : 50 + | }, + | "output_tokens" : 150, + | "output_tokens_details" : { + | "reasoning_tokens" : 75 + | }, + | "total_tokens" : 200 + | }, + | "user" : "user123" + |}""".stripMargin, + Pretty + ) + + // Response with instructions as Array but with empty messages (Reusable prompt will add this as empty if unfilled) + val responseWithInstructionsAsArrayAndEmptyContent = response.copy( + instructions = Some(Seq( + Message.InputContent(Seq(InputMessageContent.Text("Act as a helpful assistant")), ChatRole.System), + Message.InputContent(Seq.empty, ChatRole.User) + )) + ) + + testCodec[Response]( + responseWithInstructionsAsArrayAndEmptyContent, + """{ + | "created_at" : 1620000000, + | "id" : "resp_abc123", + | "instructions" : [ + | { + | "type" : "message", + | "content" : [ { + | "type" : "input_text", + | "text" : "Act as a helpful assistant" + | } ], + | "role" : "system" + | } + | , + | { + | "type" : "message", + | "content" : [], + | "role" : "user" + | } + | ], + | "max_output_tokens" : 1000, + | "metadata" : { + | "key1" : "value1", + | "key2" : "value2" + | }, + | "model" : "gpt-4o", + | "output" : [ { + | "content" : [ { + | "annotations" : [ ], + | "text" : "Hello, how can I help you today?", + | "type" : "output_text" + | } ], + | "id" : "output_def456", + | "status" : "completed", + | "type" : "message" + | } ], + | "parallel_tool_calls" : true, + | "reasoning" : { + | "effort" : "medium" + | }, + | "status" : "completed", + | "temperature" : 0.7, + | "text" : { + | "format" : { + | "type" : "text" + | } + | }, + | "tool_choice" : "auto", + | "tools" : [ { + | "name" : "get_weather", + | "parameters" : { + | "properties" : { + | "location" : { + | "description" : "The city and state, e.g. San Francisco, CA", + | "type" : "string" + | } + | }, + | "required" : [ "location" ], + | "type" : "object" + | }, + | "strict" : false, + | "description" : "Get the current weather for a location", + | "type" : "function" + | } ], + | "top_p" : 0.9, + | "truncation" : "auto", + | "usage" : { + | "input_tokens" : 50, + | "input_tokens_details" : { + | "cached_tokens" : 50 + | }, + | "output_tokens" : 150, + | "output_tokens_details" : { + | "reasoning_tokens" : 75 + | }, + | "total_tokens" : 200 + | }, + | "user" : "user123" + |}""".stripMargin, + Pretty + ) } "serialize and deserialize CreateModelResponse" in {