From a3ed6a3fc0fa668fa3f5d0cdb5d355b870a8c082 Mon Sep 17 00:00:00 2001 From: Muennighoff Date: Fri, 22 Jul 2022 19:37:14 +0200 Subject: [PATCH 1/4] Add cp complexity --- promptsource/templates.py | 16 ++++++- .../codeparrot--codecomplex/templates.yaml | 45 +++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml diff --git a/promptsource/templates.py b/promptsource/templates.py index 2b9eb341e..943f8611e 100644 --- a/promptsource/templates.py +++ b/promptsource/templates.py @@ -27,7 +27,21 @@ # These are users whose datasets should be included in the results returned by # filter_english_datasets (regardless of their metadata) -INCLUDED_USERS = {"Zaid", "craffel", "GEM", "aps", "khalidalt", "shanya", "rbawden", "BigScienceBiasEval", "gsarti"} +INCLUDED_USERS = { + "Zaid", + "craffel", + "GEM", + "aps", + "khalidalt", + "shanya", + "rbawden", + "BigScienceBiasEval", + "gsarti", + "Helsinki-NLP", + "Muennighoff", + "facebook", + "codeparrot", +} # These are the metrics with which templates can be tagged METRICS = { diff --git a/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml b/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml new file mode 100644 index 000000000..0d044c303 --- /dev/null +++ b/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml @@ -0,0 +1,45 @@ +dataset: codeparrot/codecomplex +subset: codeparrot--codecomplex +templates: + 5b108b1c-7514-488f-99ed-3ca5da70e103: !Template + answer_choices: null + id: 5b108b1c-7514-488f-99ed-3ca5da70e103 + jinja: '{{ code }} + What is the complexity of the previous code? + ||| + {{ complexity }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Other + original_task: true + name: what + reference: '' + 1d85c898-70fe-4a51-be37-5111be357762: !Template + answer_choices: null + id: 1d85c898-70fe-4a51-be37-5111be357762 + jinja: "Identify the complexity of the following code as constant, linear, quadratic, cubic, log(n), nlog(n) or NP-hard. {{ code }} |||{{ complexity }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Other + original_task: false + name: identify + reference: '' + 5d85c898-70fe-4a51-be37-5111be357762: !Template + answer_choices: null + id: 5d85c898-70fe-4a51-be37-5111be357762 + jinja: "{{ code }} Which one is the correct complexity of the code snippet: constant, linear, quadratic, cubic, log(n), nlog(n) or NP-hard? |||{{ complexity }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Other + original_task: false + name: identify + reference: '' From 0b99615616b80932b8a2bd350e38ab27a438e077 Mon Sep 17 00:00:00 2001 From: Muennighoff Date: Fri, 22 Jul 2022 19:38:53 +0200 Subject: [PATCH 2/4] Adapt names --- .../codecomplex/codeparrot--codecomplex/templates.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml b/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml index 0d044c303..5eed0aed5 100644 --- a/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml +++ b/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml @@ -15,7 +15,7 @@ templates: metrics: - Other original_task: true - name: what + name: whatcomplexity reference: '' 1d85c898-70fe-4a51-be37-5111be357762: !Template answer_choices: null @@ -28,7 +28,7 @@ templates: metrics: - Other original_task: false - name: identify + name: identifycomplexity reference: '' 5d85c898-70fe-4a51-be37-5111be357762: !Template answer_choices: null @@ -41,5 +41,5 @@ templates: metrics: - Other original_task: false - name: identify + name: whichcomplexity reference: '' From ae7f431efab7f5ef35afa7ab94a53186684e07f4 Mon Sep 17 00:00:00 2001 From: Muennighoff Date: Thu, 28 Jul 2022 15:46:44 +0200 Subject: [PATCH 3/4] complexity -> time complexity --- .../codecomplex/codeparrot--codecomplex/templates.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml b/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml index 5eed0aed5..c3280d715 100644 --- a/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml +++ b/promptsource/templates/codeparrot/codecomplex/codeparrot--codecomplex/templates.yaml @@ -5,7 +5,7 @@ templates: answer_choices: null id: 5b108b1c-7514-488f-99ed-3ca5da70e103 jinja: '{{ code }} - What is the complexity of the previous code? + What is the time complexity of the previous code? ||| {{ complexity }}' metadata: !TemplateMetadata @@ -20,7 +20,7 @@ templates: 1d85c898-70fe-4a51-be37-5111be357762: !Template answer_choices: null id: 1d85c898-70fe-4a51-be37-5111be357762 - jinja: "Identify the complexity of the following code as constant, linear, quadratic, cubic, log(n), nlog(n) or NP-hard. {{ code }} |||{{ complexity }}" + jinja: "Identify the time complexity of the following code as constant, linear, quadratic, cubic, log(n), nlog(n) or NP-hard. {{ code }} Complexity: |||{{ complexity }}" metadata: !TemplateMetadata choices_in_prompt: false languages: @@ -33,7 +33,7 @@ templates: 5d85c898-70fe-4a51-be37-5111be357762: !Template answer_choices: null id: 5d85c898-70fe-4a51-be37-5111be357762 - jinja: "{{ code }} Which one is the correct complexity of the code snippet: constant, linear, quadratic, cubic, log(n), nlog(n) or NP-hard? |||{{ complexity }}" + jinja: "{{ code }} Which one is the correct time complexity of the code snippet: constant, linear, quadratic, cubic, log(n), nlog(n) or NP-hard? |||{{ complexity }}" metadata: !TemplateMetadata choices_in_prompt: false languages: From 27f6a43a203f23c33337ecc2362e9834085700e3 Mon Sep 17 00:00:00 2001 From: Muennighoff Date: Sat, 30 Jul 2022 10:34:31 +0200 Subject: [PATCH 4/4] Add clue @07e1c20b2f4e8ac1af98d9a7a9cf3d05f007f36d --- .../templates/clue/afqmc/templates.yaml | 69 ++++++++++++++ promptsource/templates/clue/c3/templates.yaml | 82 +++++++++++++++++ .../templates/clue/cluewsc2020/templates.yaml | 78 ++++++++++++++++ .../templates/clue/cmrc2018/templates.yaml | 76 ++++++++++++++++ .../templates/clue/csl/templates.yaml | 90 +++++++++++++++++++ .../templates/clue/drcd/templates.yaml | 76 ++++++++++++++++ .../templates/clue/tnews/templates.yaml | 62 +++++++++++++ 7 files changed, 533 insertions(+) create mode 100644 promptsource/templates/clue/afqmc/templates.yaml create mode 100644 promptsource/templates/clue/c3/templates.yaml create mode 100644 promptsource/templates/clue/cluewsc2020/templates.yaml create mode 100644 promptsource/templates/clue/cmrc2018/templates.yaml create mode 100644 promptsource/templates/clue/csl/templates.yaml create mode 100644 promptsource/templates/clue/drcd/templates.yaml create mode 100644 promptsource/templates/clue/tnews/templates.yaml diff --git a/promptsource/templates/clue/afqmc/templates.yaml b/promptsource/templates/clue/afqmc/templates.yaml new file mode 100644 index 000000000..6498849d5 --- /dev/null +++ b/promptsource/templates/clue/afqmc/templates.yaml @@ -0,0 +1,69 @@ +dataset: clue +subset: afqmc +templates: + 997437fd-6888-482d-95e9-ffd867b497ee: !Template + answer_choices: no ||| yes + id: 997437fd-6888-482d-95e9-ffd867b497ee + jinja: 'Do "{{ sentence1 }}" and "{{ sentence2 }}" express the same thing? + + ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Accuracy + original_task: true + name: express_same_yes_no + reference: '' + a28370c0-d43b-405c-a9b1-4d77b3a27244: !Template + answer_choices: no ||| yes + id: a28370c0-d43b-405c-a9b1-4d77b3a27244 + jinja: "\"{{ sentence1 }}\" and \"{{ sentence2 }}\" have the same meaning. Would\ + \ you agree? Answer yes or no. \n|||\n{{ answer_choices[label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: same_meaning_agree + reference: '' + d8c303a6-61a4-47f9-8623-cc72cc3294eb: !Template + answer_choices: null + id: d8c303a6-61a4-47f9-8623-cc72cc3294eb + jinja: 'Generate another sentence that has the same meaning as "{{ sentence1 }}". + + ||| + + {% if label == 1 %} + + {{ sentence2}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - ROUGE + original_task: false + name: generate_similar_sentence + reference: '' + e3fcaefd-4e8e-4491-aab7-8efeb67a2909: !Template + answer_choices: no ||| yes + id: e3fcaefd-4e8e-4491-aab7-8efeb67a2909 + jinja: "Sentence 1: {{ sentence1 }}\nSentence 2: {{ sentence2 }}\nAre the two\ + \ sentences similar? Yes or no? \n|||\n{{ answer_choices[label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: is_similar_yes_no + reference: '' diff --git a/promptsource/templates/clue/c3/templates.yaml b/promptsource/templates/clue/c3/templates.yaml new file mode 100644 index 000000000..7856a1a1f --- /dev/null +++ b/promptsource/templates/clue/c3/templates.yaml @@ -0,0 +1,82 @@ +dataset: clue +subset: c3 +templates: + 51b3c3fe-2fa2-474a-81f9-5b421c884109: !Template + answer_choices: '{{ choice | join(" ||| ") }}' + id: 51b3c3fe-2fa2-474a-81f9-5b421c884109 + jinja: "{% for statement in context %} \n{{ statement }}\n{% endfor %}\nGiven\ + \ the dialogue / passage above, use the following options to answer the question\ + \ \"{{question}}\".\nOptions: \n- {{ answer_choices | join('\\n- ') }}\n|||\n\ + {{ answer }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: answer-question-affirmative + reference: '' + 5e06f05f-d7dd-4329-b6d8-3a62dcdba838: !Template + answer_choices: '{{ choice | join(" ||| ") }}' + id: 5e06f05f-d7dd-4329-b6d8-3a62dcdba838 + jinja: "Passage: {% for statement in context %} \n{{ statement }}\n{% endfor %}\n\ + Question: \"{{question}}\"\nAnswer choices: {{ answer_choices[:-1] | join(',\ + \ ') }}, or {{ answer_choices[-1] }}?\n|||\n{{ answer }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: question_choices_context + reference: '' + 63b5e5df-40d3-47ee-b77e-bf385c042fa9: !Template + answer_choices: null + id: 63b5e5df-40d3-47ee-b77e-bf385c042fa9 + jinja: "Passage: {% for statement in context %} \n{{ statement }}\n{% endfor %}\n\ + What kind of question would elicit an answer response of {{ answer }}?\n|||\n\ + {{ question }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - ROUGE + original_task: false + name: generate_question + reference: '' + a5820d05-a8df-4e31-a284-6969e478174b: !Template + answer_choices: '{{ choice | join('' ||| '') }}' + id: a5820d05-a8df-4e31-a284-6969e478174b + jinja: "{% for statement in context %} \n{{ statement }}\n{% endfor %}\nGiven\ + \ the dialogue / passage above, what is the answer for the question \"{{question}}\"\ + \nAnswer choices: {{ answer_choices[:-1] | join(', ') }}, or {{ answer_choices[-1]\ + \ }}?\n|||\n{{ answer }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: answer-question-interrogative + reference: '' + f15acc3f-e067-488f-b426-f65aa604da55: !Template + answer_choices: null + id: f15acc3f-e067-488f-b426-f65aa604da55 + jinja: "{% for statement in context %} \n{{ statement }}\n{% endfor %}\nGiven\ + \ the dialogue / passage above, what is the answer for the question \"{{question}}\"\ + \n|||\n{{ answer }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - ROUGE + - BLEU + - Other + original_task: false + name: answer-question-interrogative-no-choices + reference: '' diff --git a/promptsource/templates/clue/cluewsc2020/templates.yaml b/promptsource/templates/clue/cluewsc2020/templates.yaml new file mode 100644 index 000000000..4b22f666c --- /dev/null +++ b/promptsource/templates/clue/cluewsc2020/templates.yaml @@ -0,0 +1,78 @@ +dataset: clue +subset: cluewsc2020 +templates: + 321f55bb-c725-4fbf-bb7e-d46ea2f510b8: !Template + answer_choices: correct ||| wrong + id: 321f55bb-c725-4fbf-bb7e-d46ea2f510b8 + jinja: 'In the class, a teacher asks what the word "{{ target[''span2_text''] + }}" refers to in the text of "{{ text }}". The student answers "{{ target[''span1_text''] + }}". The teacher would say what? {{ answer_choices[0] | capitalize }} or {{answer_choices[1] + }}? + + ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: teacher_asking_student + reference: '' + 7282b4b5-f854-42af-8e75-d509608d97bb: !Template + answer_choices: null + id: 7282b4b5-f854-42af-8e75-d509608d97bb + jinja: 'What does the word "{{ target[''span2_text''] }}" refers to in the text + of "{{ text }}"? + + ||| + + {% if label == 0 %} + + {{ target[''span1_text''] }} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - ROUGE + original_task: false + name: generate_correct_response + reference: '' + e649a609-f7b2-43da-800d-a32090e92221: !Template + answer_choices: yes ||| no + id: e649a609-f7b2-43da-800d-a32090e92221 + jinja: "In the sentence \"{{ text }}\", does \"{{ target['span2_text'] }}\" refer\ + \ to \"{{ target['span1_text'] }}\"? \n|||\n{{ answer_choices[label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Accuracy + original_task: true + name: are_they_same + reference: '' + fc436a38-d9f5-4d17-bcf8-1e506bba5681: !Template + answer_choices: yes ||| no + id: fc436a38-d9f5-4d17-bcf8-1e506bba5681 + jinja: 'In the sentence "{{ text }}", the word "{{ target[''span2_text''] }}" + refers to "{{ target[''span1_text''] }}". Answer {{ answer_choices[0] }} if + you agree; otherwise, answer {{ answer_choices[1] }}. + + ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: affirmative_are_they_same + reference: '' diff --git a/promptsource/templates/clue/cmrc2018/templates.yaml b/promptsource/templates/clue/cmrc2018/templates.yaml new file mode 100644 index 000000000..5d2d6b2d5 --- /dev/null +++ b/promptsource/templates/clue/cmrc2018/templates.yaml @@ -0,0 +1,76 @@ +dataset: clue +subset: cmrc2018 +templates: + 3bba02e6-9266-418b-9ba0-4f71755cf3b6: !Template + answer_choices: null + id: 3bba02e6-9266-418b-9ba0-4f71755cf3b6 + jinja: 'Given this context "{{ context }}", generate a question that would return + the answer of "{{ answers[''text''][0] }}". + + ||| + + {{ question }} ' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - ROUGE + original_task: false + name: generate_question + reference: '' + 8fe02215-7881-4a61-a6e7-579680e40b9b: !Template + answer_choices: null + id: 8fe02215-7881-4a61-a6e7-579680e40b9b + jinja: "In an exam, you are asked {{ question }}, and you are tasked to find the\ + \ answer from the following passage. \n{{ context }}\nWhat's the answer?\n|||\n\ + {{ answers['text'][0] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: in_an_exam + reference: '' + 9e82f5da-b206-4758-94e6-085cf2608378: !Template + answer_choices: null + id: 9e82f5da-b206-4758-94e6-085cf2608378 + jinja: '{{ context }} + + The answer to {{ question }} is in the passage above. What is it? + + ||| + + {{ answers[''text''][0] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: answer_in_the_passage + reference: '' + 9fb15385-814e-419a-b862-2d4e06a58ef6: !Template + answer_choices: null + id: 9fb15385-814e-419a-b862-2d4e06a58ef6 + jinja: 'Answer the question using the given context. + + Question: {{ question }} + + Context: {{ context }} + + Answer: ||| + + {{ answers[''text''][0] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: answer_following_question + reference: '' diff --git a/promptsource/templates/clue/csl/templates.yaml b/promptsource/templates/clue/csl/templates.yaml new file mode 100644 index 000000000..2ba03f7da --- /dev/null +++ b/promptsource/templates/clue/csl/templates.yaml @@ -0,0 +1,90 @@ +dataset: clue +subset: csl +templates: + 219679f8-a02f-4ee3-91c7-9ed4726dd828: !Template + answer_choices: no ||| yes + id: 219679f8-a02f-4ee3-91c7-9ed4726dd828 + jinja: 'After John wrote the abstract "{{abst}}", he wrote these keywords "{{ + keyword | join('', '') }}". Do you think his choice of keywords was correct? + Answer {{ answer_choices[1]}} or {{ answer_choices[0]}}. + + ||| + + {{ answer_choices[label] }} + + + ' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Accuracy + original_task: true + name: write_keywords_after_abstract + reference: '' + 2e851dd2-2677-415a-ad90-5d885aa91fdc: !Template + answer_choices: no ||| yes + id: 2e851dd2-2677-415a-ad90-5d885aa91fdc + jinja: 'Do these keywords "{{ keyword | join('', '') }}" represent key concepts + in the abstract "{{ abst }}"? + + ||| + + {{ answer_choices[label] }} + + + ' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Accuracy + original_task: true + name: do_represent + reference: '' + 2fa6151b-a296-4cd5-83ca-dcd434b831be: !Template + answer_choices: null + id: 2fa6151b-a296-4cd5-83ca-dcd434b831be + jinja: 'Given the abstract {{abst}}, list out {{ keyword | length }} keywords + for it. + + ||| + + {% if label == 1 %} + + {{ keyword | join('', '') }} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - ROUGE + original_task: false + name: generate_keywords + reference: '' + aaf47f6f-fd8f-4180-8d85-e4c7df088ac6: !Template + answer_choices: no ||| yes + id: aaf47f6f-fd8f-4180-8d85-e4c7df088ac6 + jinja: 'A scholar used "{{ keyword | join('', '') }}" as search terms. Do you + think the search engine would return the abstract "{{abst}}"? Answer {{ answer_choices[1] + }} or {{ answer_choices[0] }}. + + ||| + + {{ answer_choices[label] }} + + + ' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: search_terms + reference: '' diff --git a/promptsource/templates/clue/drcd/templates.yaml b/promptsource/templates/clue/drcd/templates.yaml new file mode 100644 index 000000000..87d58e134 --- /dev/null +++ b/promptsource/templates/clue/drcd/templates.yaml @@ -0,0 +1,76 @@ +dataset: clue +subset: drcd +templates: + 2b2454d1-4375-4fb3-93a5-8c1e4ee605ea: !Template + answer_choices: null + id: 2b2454d1-4375-4fb3-93a5-8c1e4ee605ea + jinja: 'Answer the question using the given context. + + Question: {{ question }} + + Context: {{ context }} + + Answer: ||| + + {{ answers[''text''][0] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: answer_following_question + reference: '' + 41aebf75-a867-455b-a5dc-519ab83cf24f: !Template + answer_choices: null + id: 41aebf75-a867-455b-a5dc-519ab83cf24f + jinja: '{{ context }} + + The answer to {{ question }} is in the passage above. What is it? + + ||| + + {{ answers[''text''][0] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: answer_in_the_passage + reference: '' + ac20087c-80a0-4965-8cab-d8cb6f90a555: !Template + answer_choices: null + id: ac20087c-80a0-4965-8cab-d8cb6f90a555 + jinja: 'Given this context "{{ context }}", generate a question that would return + the answer of "{{ answers[''text''][0] }}". + + ||| + + {{ question }} ' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - ROUGE + original_task: false + name: generate_question + reference: '' + b2684f23-b191-4e6d-9dc5-12b1d7d4cf49: !Template + answer_choices: null + id: b2684f23-b191-4e6d-9dc5-12b1d7d4cf49 + jinja: "In an exam, you are asked {{ question }}, and you are tasked to find the\ + \ answer from the following passage. \n{{ context }}\nWhat's the answer?\n|||\n\ + {{ answers['text'][0] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: in_an_exam + reference: '' diff --git a/promptsource/templates/clue/tnews/templates.yaml b/promptsource/templates/clue/tnews/templates.yaml new file mode 100644 index 000000000..685170cc5 --- /dev/null +++ b/promptsource/templates/clue/tnews/templates.yaml @@ -0,0 +1,62 @@ +dataset: clue +subset: tnews +templates: + 0c965fcd-d5e9-4e6a-b8ec-13253ed7bf4a: !Template + answer_choices: story ||| culture ||| entertainment ||| sports ||| finance ||| + real estate ||| car ||| education ||| tech ||| military ||| travel ||| world + news ||| stock ||| agriculture ||| game + id: 0c965fcd-d5e9-4e6a-b8ec-13253ed7bf4a + jinja: 'Classify the title "{{ sentence }}" into the following topics: + + - {{ answer_choices | join(''\n- '') }} + + Topic: ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: classify_title + reference: '' + 26e75138-7aa4-4b13-b2fa-7cd4ad5484b3: !Template + answer_choices: story ||| culture ||| entertainment ||| sports ||| finance ||| + real estate ||| car ||| education ||| tech ||| military ||| travel ||| world + news ||| stock ||| agriculture ||| game + id: 26e75138-7aa4-4b13-b2fa-7cd4ad5484b3 + jinja: "Given the topics of {{answer_choices[:-1] | join(', ') }}, and {{ answer_choices[-1]\ + \ }}, specify which of them best represents the following sentence:\n{{ sentence\ + \ }}\n\nBest: ||| \n{{ answer_choices[label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: best_represent + reference: '' + c242254c-bf5d-4efb-9dc3-51717bab7f78: !Template + answer_choices: story ||| culture ||| entertainment ||| sports ||| finance ||| + real estate ||| car ||| education ||| tech ||| military ||| travel ||| world + news ||| stock ||| agriculture ||| game + id: c242254c-bf5d-4efb-9dc3-51717bab7f78 + jinja: 'What topic does the following news title "{{ sentence }}" belong to? {{ + answer_choices[0] | capitalize }}, {{ answer_choices[1:-1] | join('', '') }}, + or {{ answer_choices[-1] }}? + + ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: what_title + reference: ''