diff --git a/Dockerfile b/Dockerfile index 53545af..c00842e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,12 +12,4 @@ ENV FUNCTION_INTERFACE="file" ENV LOG_LEVEL="DEBUG" # Copy the evaluation function to the app directory -COPY ./evaluation_function.wl /app/evaluation_function.wl - -RUN apt-get update && apt-get install -y \ - libglib2.0-0 \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=wolframresearch/wolframengine:12.3.1 \ - /usr/local/Wolfram/WolframEngine/12.3/SystemFiles/Libraries/Linux-x86-64/libiomp5.so \ - /usr/local/Wolfram/WolframEngine/13.3/SystemFiles/Libraries/Linux-x86-64/libiomp5.so \ No newline at end of file +COPY ./evaluation_function.wl /app/evaluation_function.wl \ No newline at end of file diff --git a/README.md b/README.md index c5a4e7c..71b1128 100644 --- a/README.md +++ b/README.md @@ -29,14 +29,14 @@ In[1] := $PasswordFile // FilePrint 1e1d781ed0a3 6520-03713-97466 4304-2718-2K5ATR 5095-179-696:2,0,8,8:80001:20190627 ``` -This gives you a password that you can copy to a `mathpass` file on your host machine. +This gives you a password that you can copy to a `mathpass` file on your host machine. Recommended to store in the `local directory/Licensing` -**4. Run the Wolfram Engine container** +**4. Run the Evaluation Function container** Run the following command to start the Wolfram Engine container with the license: ```bash -docker run -it --rm -v $(pwd)/mathpass:/home/wolframengine/.WolframEngine/Licensing/mathpass wolframresearch/wolframengine +docker run -it --rm -v $(pwd)/Licensing:/home/wolframengine/.WolframEngine/Licensing/ {evaluation_function_tag} ``` -This command assumes that you have a `mathpass` file in the current directory, and the container is started with the `wolframengine` user. +This command assumes that you have a `mathpass` file in the Licensing directory, and the container is started with the `wolframengine` user. diff --git a/evaluation_function.wl b/evaluation_function.wl index 024c90a..eca7a08 100644 --- a/evaluation_function.wl +++ b/evaluation_function.wl @@ -2,42 +2,43 @@ including to within a given tolerance; input and output as Associations *) -equalQAssociation = - Function[ - Module[{tolerance, correctQ, error}, - If[NumericQ[#answer], - tolerance = - If[#params["tolerance_is_absolute"], - #params["tolerance"] - , - #params["tolerance"] * #params["answer"] - ]; - error = Abs[#answer - #response]; - correctQ = TrueQ[error <= tolerance] - , - error = "not applicable"; - correctQ = TrueQ[#answer == #response] - ]; - <| - "command" -> "eval" - , - "result" -> - { - "is_correct" -> correctQ - , - "feedback" -> - If[correctQ, - #params["correct_response_feedback"] - , - #params["incorrect_response_feedback" - ] - ] - , - "error" -> error - } - |> - ] - ]; +equalQAssociation = + Function[input, + Module[{data, tolerance, correctQ, error, answer, response, params, feedback}, + (*Get the evaluation parameters from the incoming request*) + data = input["params"]; + answer = data["answer"]; + response = data["response"]; + params = data["params"]; + + If[NumericQ[answer], + tolerance = + If[TrueQ[params["tolerance_is_absolute"]], + params["tolerance"], + params["tolerance"] * answer + ]; + error = Abs[answer - response]; + correctQ = TrueQ[error <= tolerance], + error = "not applicable"; + correctQ = TrueQ[answer == response] + ]; + + feedback = + If[correctQ, + params["correct_response_feedback"], + params["incorrect_response_feedback"] + ]; + + <| + "command" -> "eval", + "result" -> <| + "is_correct" -> correctQ, + "feedback" -> feedback + |> + |> + ] + ]; + (* A function to test whether a response is equal to an answer, \ including to within a given tolerance; input and output as @@ -57,6 +58,6 @@ Calls equalQAssociation *) equalQIO = Function[Export[#2, equalQAssociation[Import[#1, "JSON"] //. List :> Association], "JSON", "Compact" -> True]]; -argv = Rest[$ScriptCommandLine] +argv = Rest[$ScriptCommandLine]; equalQIO[argv[[1]], argv[[2]]] diff --git a/input.json b/input.json new file mode 100644 index 0000000..31add5a --- /dev/null +++ b/input.json @@ -0,0 +1,13 @@ +{ + "method": "eval", + "params": { + "answer": 1, + "response": 1, + "params": { + "tolerance": 0.01, + "tolerance_is_absolute": true, + "correct_response_feedback": "Correct!", + "incorrect_response_feedback": "Try again." + } + } +}