diff --git a/docs/conf_master.md b/docs/conf_master.md index ccb34e2..6ef4194 100644 --- a/docs/conf_master.md +++ b/docs/conf_master.md @@ -35,7 +35,7 @@ following message next time they want to perform a HIT from this group: * `allowed_max_hit_in_project:60`: Number of assignments that one worker can perform from this project. * `hit_base_payment:0.5`: Base payment for an accepted assignment from this HIT. This value will be used as information. * `quantity_hits_more_than: 30`: Defines the necessary hits required for quantity bonus. -* `quantity_bonus: 0.1`: The amount of the quantity bonus to be payed for each accepted assignment. +* `quantity_bonus: 0.1`: The amount of the quantity bonus to be paid for each accepted assignment. * `quality_top_percentage: 20`: Defines when quality bonus should be applied (in addition, participant should be eligible for quantity bonus). * `quality_bonus: 0.15`: the amount of the quality bonus per accepted assignment. @@ -57,7 +57,7 @@ following message next time they want to perform a HIT from this group: * `allowed_max_hit_in_project:60`: Number of assignments that one worker can perform from this project. * `hit_base_payment:0.5`: Base payment for an accepted assignment from this HIT. This value will be used as information. * `quantity_hits_more_than: 30`: Defines the necessary hits required for quantity bonus. -* `quantity_bonus: 0.1`: The amount of the quantity bonus to be payed for each accepted assignment. +* `quantity_bonus: 0.1`: The amount of the quantity bonus to be paid for each accepted assignment. * `quality_top_percentage: 20`: Defines when quality bonus should be applied (in addition, participant should be eligible for quantity bonus). * `quality_bonus: 0.15`: the amount of the quality bonus per accepted assignment. @@ -76,7 +76,7 @@ following message next time they want to perform a HIT from this group: * `allowed_max_hit_in_project:60`: Number of assignments that one worker can perform from this project. * `hit_base_payment:0.5`: Base payment for an accepted assignment from this HIT. This value will be used as information. * `quantity_hits_more_than: 30`: Defines when quantity bonus requirement. -* `quantity_bonus: 0.1`: the amount of the quantity bonus to be payed for each accepted assignment. +* `quantity_bonus: 0.1`: the amount of the quantity bonus to be paid for each accepted assignment. * `quality_top_percentage: 20`: Defines when quality bonus should be applied (in addition, participant should be eligible for quantity bonus). * `quality_bonus: 0.15`: the amount of the quality bonus per accepted assignment. diff --git a/docs/prep_p804.md b/docs/prep_p804.md index 0b83a77..cf91c99 100644 --- a/docs/prep_p804.md +++ b/docs/prep_p804.md @@ -17,7 +17,7 @@ column named `rating_clips` (see [rating_clips.csv](../src/test_inputs/rating_cl **Note on Reference Conditions** * It is strongly recommended to include Reference Conditions in your study to cover the entire range of MOS on all - dimesions. + dimensions. 1. Upload your **training clips** in a cloud server and create `training_gold_clips.csv` file which contains all URLs in a column named `training_clips` (see [training_gold_clips.csv](../src/test_inputs/training_gold_clips_p804.csv) as an example). @@ -35,7 +35,7 @@ any given answer for that dimension will be considered to be correct. **Hint**: Gold standard clips are used as a hidden quality control item in each session. It is expected that their answers are so obvious for all participants that they all give the `*_ans` rating (+/- 1 deviation is accepted) for all dimensions. It is recommended to use clips with excellent (answer 5) or very bad - (answer 1) quality. Also clips with extreme and oposite value for multiple dimensions works best (e.g. Coloration 5 and Discontinuty 1). + (answer 1) quality. Also clips with extreme and opposite value for multiple dimensions work best (e.g. Coloration 5 and Discontinuity 1). 1. Create trapping stimuli set for your dataset. diff --git a/docs/prep_p835.md b/docs/prep_p835.md index 3a15a3a..c93f8b8 100644 --- a/docs/prep_p835.md +++ b/docs/prep_p835.md @@ -17,7 +17,7 @@ column named `rating_clips` (see [rating_clips.csv](../src/test_inputs/rating_cl **Note on Reference Conditions** * It is strongly recommended to include Reference Conditions in your study to cover the entire range of MOS on all - three scales. Results of our studies showed that Reference Conditions based on teh ITU-T Rec. P.835 does not cover + three scales. Results of our studies showed that Reference Conditions based on the ITU-T Rec. P.835 do not cover the entire range of scales, rather the framework propose in ETSI 103 281 Annex D can cover the entire range. We recommend to use [3gpp_p501_FB](../p835_reference_conditions/3gpp_p501_FB) which is created base on the ETSI/3GPP framework. diff --git a/src/P808Template/ACR_template.html b/src/P808Template/ACR_template.html index c12d505..e72b42c 100644 --- a/src/P808Template/ACR_template.html +++ b/src/P808Template/ACR_template.html @@ -1231,7 +1231,7 @@

-
+
diff --git a/src/P808Template/CCR_template.html b/src/P808Template/CCR_template.html index e1137a0..5bf1d10 100644 --- a/src/P808Template/CCR_template.html +++ b/src/P808Template/CCR_template.html @@ -477,7 +477,7 @@ } /* -Remove the answers for all pair-comparision questions in the setup section. +Remove the answers for all pair-comparison questions in the setup section. */ function makeAllCMPsUnChecked(){ // remove the listeners @@ -1455,7 +1455,7 @@

-
+
diff --git a/src/P808Template/DCR_template.html b/src/P808Template/DCR_template.html index 7f45bf4..429e587 100644 --- a/src/P808Template/DCR_template.html +++ b/src/P808Template/DCR_template.html @@ -1160,7 +1160,7 @@

-
+
diff --git a/src/P808Template/P808_multi.html b/src/P808Template/P808_multi.html index 1660376..7961bbc 100644 --- a/src/P808Template/P808_multi.html +++ b/src/P808Template/P808_multi.html @@ -494,7 +494,7 @@ } let gold_stat = getGoldQuestionStat(); - console.log("gold_stat:"+ gold_stat['failed'] +"warining:" + gold_stat['warning_showed'] ); + console.log("gold_stat:"+ gold_stat['failed'] +"warning:" + gold_stat['warning_showed'] ); let force_re_training = false; if (gold_stat['failed']>= config['goldClip']['n_fail_block']) { disableTheHIT(Hide_HIT_REASON.GOLD_FAILED); @@ -834,7 +834,7 @@ } function checkGoldQCorrect(given_ans, accepted_var, correct_hash, url) { - // return true if correct_hash is null or its lenght after trim is 0 + // return true if correct_hash is null or its length after trim is 0 if (!correct_hash || correct_hash.trim().length == 0) return true; @@ -1239,7 +1239,7 @@ /** - * Check is the givebn url belongs to a trapping questionin training section, If so returns its info, otherwise null- + * Check is the givebn url belongs to a trapping question in training section, If so returns its info, otherwise null- * @param {*} element **/ function getGoldQuestionInTrainingInfo(url){ @@ -1471,7 +1471,7 @@ return; stat = getGoldQuestionStat(); - console.log('current stat: faild '+stat['failed']+ ', sum: '+stat['sum']); + console.log('current stat: failed '+stat['failed']+ ', sum: '+stat['sum']); is_correct = true; // check the answers if ( !checkGoldQCorrect(Number(given_ans), 1, ans, goldClipAns["url"])){ @@ -1502,7 +1502,7 @@ } } saveGoldQuestionStat(stat); - console.log('new stat: faild '+stat['failed']+ ', sum: '+stat['sum']); + console.log('new stat: failed '+stat['failed']+ ', sum: '+stat['sum']); } /* @@ -1524,7 +1524,7 @@ return; stat = getGoldQuestionStat(); - //console.log('current stat: faild '+stat['failed']+ ', sum: '+stat['sum']); + //console.log('current stat: failed '+stat['failed']+ ', sum: '+stat['sum']); is_correct = true; // check the answers if ( !checkGoldQCorrect(Number(given_ans), 1, ans, goldClipAns["url"])){ @@ -1556,7 +1556,7 @@ } } saveGoldQuestionStat(stat); - //console.log('new stat: faild '+stat['failed']+ ', sum: '+stat['sum']); + //console.log('new stat: failed '+stat['failed']+ ', sum: '+stat['sum']); } /* @@ -1866,7 +1866,7 @@ - +


`; @@ -1887,7 +1887,7 @@ - +
@@ -2161,7 +2161,7 @@ } /* Generate the training section - lod, rev, col, dis + loud, rev, col, dis */ function generate_training_section() { @@ -2599,7 +2599,7 @@ - +
-
+
@@ -3325,8 +3325,8 @@

Scales and examples:

-
+
diff --git a/src/P808Template/P831_DCR_template.html b/src/P808Template/P831_DCR_template.html index 6644869..aa63935 100644 --- a/src/P808Template/P831_DCR_template.html +++ b/src/P808Template/P831_DCR_template.html @@ -1090,7 +1090,7 @@

-
+
diff --git a/src/P808Template/P835_personalized_template3.html b/src/P808Template/P835_personalized_template3.html index 1a18006..ce25068 100644 --- a/src/P808Template/P835_personalized_template3.html +++ b/src/P808Template/P835_personalized_template3.html @@ -320,8 +320,8 @@ } let gold_stat = getGoldQuestionStat(); - console.log("gold_stat:"+ gold_stat['failed'] +"warining:" + gold_stat['warning_showed'] ); - cookie_debug_status = cookie_debug_status + "gold_stat"+ gold_stat['failed'] +",warining:" + gold_stat['warning_showed'] + "*"; + console.log("gold_stat:"+ gold_stat['failed'] +"warning:" + gold_stat['warning_showed'] ); + cookie_debug_status = cookie_debug_status + "gold_stat"+ gold_stat['failed'] +",warning:" + gold_stat['warning_showed'] + "*"; let force_re_training = false; if (gold_stat['failed']>= config['goldClip']['n_fail_block']) { disableTheHIT(Hide_HIT_REASON.GOLD_FAILED); @@ -954,7 +954,7 @@ } /** - * Check is the givebn url belongs to a trapping questionin training section, If so returns its info, otherwise null- + * Check is the givebn url belongs to a trapping question in training section, If so returns its info, otherwise null- * @param {*} element **/ function getGoldQuestionInTrainingInfo(url){ @@ -1746,7 +1746,7 @@ - +
-
+
@@ -2436,13 +2436,13 @@

Important examples:

data-src="https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/personalized_p835/example2.wav"> 

-

Example 3:

The speech signal of target speaker is completly removed from judgment segment (very distorted, score1 ). Although in judgment segment there is no background noise, but +

Example 3:

The speech signal of target speaker is completely removed from judgment segment (very distorted, score1 ). Although in judgment segment there is no background noise, but voice of another person. It is considered as a very intrusive background (score 1). The overall quality is Bad (score 1) as the system failed to recognize the target speaker and filter her out in the judgement segment.

 

-

Example 4:

The speech signal of target speaker is completly removed from judgment segment (very distorted, score1 ). The judgment segment doesnot contain any voice but slightly noticeable (score 4) background noise. The overall quality is Bad (score 1) as the system completly removed target speaker from the judgement segment.

+

Example 4:

The speech signal of target speaker is completely removed from judgment segment (very distorted, score1 ). The judgment segment does not contain any voice but slightly noticeable (score 4) background noise. The overall quality is Bad (score 1) as the system completely removed target speaker from the judgement segment.

 

@@ -2601,7 +2601,7 @@

Well done!

You performed the maximum number of HITs that you were allo
-
+
@@ -1770,7 +1770,7 @@

The training section is identical to the Rating section, only with specific audio samples to familiarize you with the rating tasks:

-

In this experiment you will be rating the quality of sound samples involving speech in background noise. Each trial will include 1 audio sample which contains one or mroe sentences in a noisy background. Within each trial you will give three ratings asked in three questions all regarding to the same audio sample.

+

In this experiment you will be rating the quality of sound samples involving speech in background noise. Each trial will include 1 audio sample which contains one or more sentences in a noisy background. Within each trial you will give three ratings asked in three questions all regarding to the same audio sample.

You will be instructed to provide following ratings in each trial:

  • attend only to the speech signal and rate how distorted the speech signal sounds to you.
  • @@ -1822,7 +1822,7 @@

    Ratings

-

In this experiment you will be rating the quality of sound samples involving speech in background noise. Each trial will include 1 audio sample which contains one or mroe sentences in a noisy background. Within each trial you will give three ratings asked in three questions all regarding to the same audio sample.

+

In this experiment you will be rating the quality of sound samples involving speech in background noise. Each trial will include 1 audio sample which contains one or more sentences in a noisy background. Within each trial you will give three ratings asked in three questions all regarding to the same audio sample.

You will be instructed to provide following ratings in each trial:

  • attend only to the speech signal and rate how distorted the speech signal sounds to you.
  • diff --git a/src/P808Template/P835_template_one_audio.html b/src/P808Template/P835_template_one_audio.html index 2586cda..5e9fc49 100644 --- a/src/P808Template/P835_template_one_audio.html +++ b/src/P808Template/P835_template_one_audio.html @@ -1301,7 +1301,7 @@

-
+
@@ -1553,7 +1553,7 @@

The training section is identical to the Rating section, only with specific audio samples to familiarize you with the rating tasks:

-

In this experiment you will be rating the quality of sound samples involving speech in background noise. Each trial will include 1 audio sample which contains one or mroe sentences in a noisy background. Within each trial you will give three ratings asked in three questions all regarding to the same audio sample.

+

In this experiment you will be rating the quality of sound samples involving speech in background noise. Each trial will include 1 audio sample which contains one or more sentences in a noisy background. Within each trial you will give three ratings asked in three questions all regarding to the same audio sample.

You will be instructed to provide following ratings in each trial:

  • attend only to the speech signal and rate how distorted the speech signal sounds to you.
  • @@ -1605,7 +1605,7 @@

    Ratings

-

In this experiment you will be rating the quality of sound samples involving speech in background noise. Each trial will include 1 audio sample which contains one or mroe sentences in a noisy background. Within each trial you will give three ratings asked in three questions all regarding to the same audio sample.

+

In this experiment you will be rating the quality of sound samples involving speech in background noise. Each trial will include 1 audio sample which contains one or more sentences in a noisy background. Within each trial you will give three ratings asked in three questions all regarding to the same audio sample.

You will be instructed to provide following ratings in each trial:

  • attend only to the speech signal and rate how distorted the speech signal sounds to you.
  • diff --git a/src/P808Template/Qualification.html b/src/P808Template/Qualification.html index 0f46e28..9c76701 100644 --- a/src/P808Template/Qualification.html +++ b/src/P808Template/Qualification.html @@ -340,7 +340,7 @@

-
+
diff --git a/src/P808Template/README.md b/src/P808Template/README.md index d5521f7..b5fdea7 100644 --- a/src/P808Template/README.md +++ b/src/P808Template/README.md @@ -25,7 +25,7 @@ The qualification will be assigned when a user #### Setup (every X minutes) Contains 6 questions: 1) To adjust the listening level, 2) A short math exercise with digits panning between left and -right in stereo to proove usage of two-eared headphones. 3-6) Environment Test in form of pair comparision test. Stimuli +right in stereo to prove usage of two-eared headphones. 3-6) Environment Test in form of pair comparison test. Stimuli presented here are carefully selected, to represent finest Just Noticeable Difference in Quality recognizable by normal participants in a laboratory session. It is expected with a proper setting, a crowd worker be able to answer at least 2/4 questions correctly. diff --git a/src/P808Template/bw_check.html b/src/P808Template/bw_check.html index 57d869d..aceecb6 100644 --- a/src/P808Template/bw_check.html +++ b/src/P808Template/bw_check.html @@ -308,7 +308,7 @@ } let gold_stat = getGoldQuestionStat(); - console.log("gold_stat:"+ gold_stat['failed'] +"warining:" + gold_stat['warning_showed'] ); + console.log("gold_stat:"+ gold_stat['failed'] +"warning:" + gold_stat['warning_showed'] ); let force_re_training = false; if (gold_stat['failed']>= config['goldClip']['n_fail_block']) { disableTheHIT(Hide_HIT_REASON.GOLD_FAILED); @@ -926,7 +926,7 @@ } /** - * Check is the givebn url belongs to a trapping questionin training section, If so returns its info, otherwise null- + * Check is the givebn url belongs to a trapping question in training section, If so returns its info, otherwise null- * @param {*} element **/ function getGoldQuestionInTrainingInfo(url){ @@ -1724,11 +1724,11 @@ "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g5_48_n.wav":'nn', } bw_test_data_wrong_msg = { - "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g1_48_n.wav":'[3.5-24KHz - Basic faild]', - "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g2_48_n.wav":'[+8Khz (SWB) faild]', - "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g3_48_n.wav":'[+15Khz FB faild]', - "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g4_48_n.wav":'[trapping faild]', - "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g5_48_n.wav":'[trapping faild]', + "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g1_48_n.wav":'[3.5-24KHz - Basic failed]', + "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g2_48_n.wav":'[+8Khz (SWB) failed]', + "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g3_48_n.wav":'[+15Khz FB failed]', + "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g4_48_n.wav":'[trapping failed]', + "https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/bw-test/g5_48_n.wav":'[trapping failed]', } bw_v2_test_data ={ @@ -1740,11 +1740,11 @@ } bw_v2_test_data_wrong_msg ={ - "comb_bw1":'[3.5-24KHz - Basic faild]', - "comb_bw2":'[+8Khz (SWB) faild]', - "comb_bw3":'[+15Khz FB faild]', - "comb_bw4": '[trapping faild]', - "comb_bw5":'[trapping faild]' + "comb_bw1":'[3.5-24KHz - Basic failed]', + "comb_bw2":'[+8Khz (SWB) failed]', + "comb_bw3":'[+15Khz FB failed]', + "comb_bw4": '[trapping failed]', + "comb_bw5":'[trapping failed]' } @@ -1979,7 +1979,7 @@

Version 2

- +
-
+
@@ -2415,13 +2415,13 @@

Important examples:

data-src="https://audiosamplesp808.blob.core.windows.net/p808-assets/clips/personalized_p835/example2.wav"> 

-

Example 3:

The speech signal of target speaker is completly removed from judgment segment (very distorted, score1 ). Although in judgment segment there is no background noise, but +

Example 3:

The speech signal of target speaker is completely removed from judgment segment (very distorted, score1 ). Although in judgment segment there is no background noise, but voice of another person. It is considered as a very intrusive background (score 1). The overall quality is Bad (score 1) as the system failed to recognize the target speaker and filter her out in the judgement segment.

 

-

Example 4:

The speech signal of target speaker is completly removed from judgment segment (very distorted, score1 ). The judgment segment doesnot contain any voice but slightly noticeable (score 4) background noise. The overall quality is Bad (score 1) as the system completly removed target speaker from the judgement segment.

+

Example 4:

The speech signal of target speaker is completely removed from judgment segment (very distorted, score1 ). The judgment segment does not contain any voice but slightly noticeable (score 4) background noise. The overall quality is Bad (score 1) as the system completely removed target speaker from the judgement segment.

 

@@ -2580,7 +2580,7 @@

Well done!

You performed the maximum number of HITs that you were allo
-
+
diff --git a/src/P808Template/echo_impairment_test_template.html b/src/P808Template/echo_impairment_test_template.html index 5e4e6f5..779bc21 100644 --- a/src/P808Template/echo_impairment_test_template.html +++ b/src/P808Template/echo_impairment_test_template.html @@ -1384,7 +1384,7 @@

-
+
diff --git a/src/assets_master_script/acr_result_parser_template.cfg b/src/assets_master_script/acr_result_parser_template.cfg index ce601aa..e2ca537 100644 --- a/src/assets_master_script/acr_result_parser_template.cfg +++ b/src/assets_master_script/acr_result_parser_template.cfg @@ -32,7 +32,7 @@ variance: 1 [acceptance_criteria] all_audio_played_equal: 1 -# bandwidth controll range: "NB-WB", "SWB", "FB" +# bandwidth control range: "NB-WB", "SWB", "FB" bw_min: {{cfg.bw_min}}, bw_max: {{cfg.bw_max}}, # number of correct answers to the math questions should be bigger and equal to diff --git a/src/assets_master_script/dcr_ccr_result_parser_template.cfg b/src/assets_master_script/dcr_ccr_result_parser_template.cfg index c8a3cf1..aa8cbd6 100644 --- a/src/assets_master_script/dcr_ccr_result_parser_template.cfg +++ b/src/assets_master_script/dcr_ccr_result_parser_template.cfg @@ -24,7 +24,7 @@ url_found_in: input.tp [acceptance_criteria] all_audio_played_equal: 1 -# bandwidth controll range: "NB-WB", "SWB", "FB" +# bandwidth control range: "NB-WB", "SWB", "FB" bw_min: {{cfg.bw_min}}, bw_max: {{cfg.bw_max}}, # number of correct answers to the math questions should be bigger and equal to diff --git a/src/assets_master_script/p804_result_parser_template.cfg b/src/assets_master_script/p804_result_parser_template.cfg index a02191c..0383955 100644 --- a/src/assets_master_script/p804_result_parser_template.cfg +++ b/src/assets_master_script/p804_result_parser_template.cfg @@ -32,7 +32,7 @@ url2_found_in: input.gold_url_2 [acceptance_criteria] all_audio_played_equal: 1 -# bandwidth controll range: "NB-WB", "SWB", "FB" +# bandwidth control range: "NB-WB", "SWB", "FB" bw_min: {{cfg.bw_min}}, bw_max: {{cfg.bw_max}}, # number of correct answers to the math questions should be bigger and equal to diff --git a/src/configurations/master.cfg b/src/configurations/master.cfg index 005c3ae..55f1000 100644 --- a/src/configurations/master.cfg +++ b/src/configurations/master.cfg @@ -132,7 +132,7 @@ Container:p808-assets Path:/clips/sample_jnd/ [RatingClips] -# Comma seperated config keys for the various store configurations holding the rating clips +# Comma separated config keys for the various store configurations holding the rating clips # Ex: RatingClipsConfigurations:store1,store2 RatingClipsConfigurations: diff --git a/src/create_input.py b/src/create_input.py index ac9ee88..4729dda 100644 --- a/src/create_input.py +++ b/src/create_input.py @@ -385,7 +385,7 @@ def create_input_for_acr(cfg, df, output_path, method): tmp = df_small[['gold_url', 'gold_ovrl_ans', 'gold_sig_ans', 'gold_noise_ans', 'gold_col_ans', 'gold_loud_ans', 'gold_disc_ans', 'gold_reverb_ans']].copy() tmp = tmp.dropna(subset=['gold_url']) tmp = tmp.sample(frac=1, ignore_index=True) - # get dataframe lenght + # get dataframe length size = len(tmp) g_clips = tmp.copy() for i in range (1, (n_gold_clips//size)+1): diff --git a/src/environment test/README.md b/src/environment test/README.md index ed60760..29e2467 100644 --- a/src/environment test/README.md +++ b/src/environment test/README.md @@ -7,7 +7,7 @@ It is an implementation of the Adaptive Staircase Psychoacoustics method (3AFC, See [4] for full description of the method and its validation. ## Setup -Speech files should be located in `assets/jnd_noise`. File names should be formated like `[SNR]S_FILE_NAME.wav` with SNR +Speech files should be located in `assets/jnd_noise`. File names should be formatted like `[SNR]S_FILE_NAME.wav` with SNR ranges from 30 to 50 dB. Accordingly update the `assets/js/env_test_main.js`. The speech files should be degraded with same noise type (e.g. white-noise). @@ -20,7 +20,7 @@ Result shows the SNR level that the subject can successfully recognize its diffe ## References -[1]. Levit t , H. (1992).Adaptive procedures for hearing aid prescription and other audiologic applications. Journal of the American Academy of Audiology, 3, 119-131. +[1]. Levitt, H. (1992). Adaptive procedures for hearing aid prescription and other audiologic applications. Journal of the American Academy of Audiology, 3, 119-131. [2]. [ITU-T Recommendation P. 501](https://www.itu.int/rec/T-REC-P.501-201703-I/en): P.501 : Test signals for use in telephonometry, International Telecommunication Union, Geneva, 2017. diff --git a/src/environment test/assets/js/env_test_main.js b/src/environment test/assets/js/env_test_main.js index b4ef6a8..867a536 100644 --- a/src/environment test/assets/js/env_test_main.js +++ b/src/environment test/assets/js/env_test_main.js @@ -194,7 +194,7 @@ function start(){ function addJNDQuestion(n,snrLevel){ pick_is_added = false ; - var tempelate='
Sample A
Sample B
'; + var template='
Sample A
Sample B
'; a = snrLevel; b = config.snrEnd; @@ -209,7 +209,7 @@ function addJNDQuestion(n,snrLevel){ f1=fileNames[index].f(fileName.f(a)); f2=fileNames[index].f(fileName.f(b));; - text=tempelate.f(n,f1,f2,a,b); + text=template.f(n,f1,f2,a,b); console.log("Question "+n+", : A: "+a+", B: "+b); $("#cmp-body").append(text); @@ -220,9 +220,9 @@ function addJNDQuestion(n,snrLevel){ },500); } -/* -Called when user submit an answer for a pair comparision by clicking on "Next" -*/ +/* +Called when user submit an answer for a pair comparison by clicking on "Next" +*/ function submitAnsJnd(qNum,aSNR,bSNR){ if (!document.querySelector('input[name="cmp'+qNum+'"]:checked')){ alert("Please select your answer."); diff --git a/src/hearing test/README.md b/src/hearing test/README.md index c5c4c34..fa3cf84 100644 --- a/src/hearing test/README.md +++ b/src/hearing test/README.md @@ -1,9 +1,9 @@ # Hearing Test This is an implementation of digit-triplet test [1]. -It estimated a SNR level that the user in current environmental setting can recognize three spoken digits in present of background noise. -Subject should listent to several audio clips and each time type in which number is spoken. To estimnate the -It is an implementation of the Adaptive Staircase Psychoacoustics method (3AFC, 2 down- 1 up) as proposed by Levit [2]. +It estimates an SNR level that the user in the current environmental setting can recognize three spoken digits in the presence of background noise. +Subject should listen to several audio clips and each time type in which number is spoken. To estimate the +It is an implementation of the Adaptive Staircase Psychoacoustics method (3AFC, 2 down- 1 up) as proposed by Levitt [2]. ## Setup @@ -22,7 +22,7 @@ Result shows the SNR level that the subject can successfully recognize the three ## References [1]. Smits, Cas, Theo S. Kapteyn, and Tammo Houtgast. "Development and validation of an automatic speech-in-noise screening test by telephone." International journal of audiology 43.1 (2004): 15-28. -[2]. Levit t , H. (1992).Adaptive procedures for hearing aid prescription and other audiologic applications. Journal of +[2]. Levitt, H. (1992). Adaptive procedures for hearing aid prescription and other audiologic applications. Journal of the American Academy of Audiology, 3, 119-131. diff --git a/src/hearing test/assets/js/hearing_test.js b/src/hearing test/assets/js/hearing_test.js index e97876a..3164c34 100644 --- a/src/hearing test/assets/js/hearing_test.js +++ b/src/hearing test/assets/js/hearing_test.js @@ -208,7 +208,7 @@ var last_num=""; function addJNDQuestion(n,snrLevel){ pick_is_added = false ; // 0: question Number, 1: clip, 2: correct ans - var tempelate='
'; + var template='
'; a = snrLevel; b = config.snrEnd; @@ -221,7 +221,7 @@ function addJNDQuestion(n,snrLevel){ last_num = num; f=fileName.f(snrLevel,num); - text = tempelate.f(n,f,snrLevel, num); + text = template.f(n,f,snrLevel, num); console.log("Question "+n+", : num: "+num, 'f:'+f); $("#cmp-body").append(text); @@ -248,9 +248,9 @@ function addJNDQuestion(n,snrLevel){ } -/* -Called when user submit an answer for a pair comparision by clicking on "Next" -*/ +/* +Called when user submits an answer for a pair comparison by clicking on "Next" +*/ function submitAnsJnd(qNum,snr, correct_num){ if (!document.querySelector('input[name="num_'+qNum+'"]').value){ alert("Please enter the number you heard."); diff --git a/src/master_script.py b/src/master_script.py index 4478aaf..6b90d06 100644 --- a/src/master_script.py +++ b/src/master_script.py @@ -176,7 +176,7 @@ async def create_hit_app_ccr_dcr(cfg, template_path, out_path, training_path, cf rating_urls = [] n_clips = int(cfg_g['number_of_clips_per_session']) n_traps = int(cfg_g['number_of_trapping_per_session']) - # 'dummy':'dummy' is added becuase of current bug in AMT for replacing variable names. See issue #6 + # 'dummy':'dummy' is added because of current bug in AMT for replacing variable names. See issue #6 for i in range(0, n_clips): rating_urls.append({"ref": f"${{Q{i}_R}}", "processed": f"${{Q{i}_P}}", 'dummy': 'dummy'}) diff --git a/src/mturk_utils.py b/src/mturk_utils.py index 0ba5102..f41baf8 100644 --- a/src/mturk_utils.py +++ b/src/mturk_utils.py @@ -281,9 +281,9 @@ def approve_reject_assignments(client, assignment_path, approve): print(f'Processed {line_count} assignments - sent {success} calls was successful and {failed} calls failed.') -def get_assignment_review_policy(cfg): - """ - Create an Assigment Review Policy as explained by MTurke +def get_assignment_review_policy(cfg): + """ + Create an Assignment Review Policy as explained by MTurk :param cfg: configuration object :return: """ diff --git a/src/process_qualification_ans.py b/src/process_qualification_ans.py index ca5f399..cb67c64 100644 --- a/src/process_qualification_ans.py +++ b/src/process_qualification_ans.py @@ -67,9 +67,9 @@ def check_last_audio_test(ans): return ans in config['6_audio_test'] -def check_working_experities(ans): +def check_working_expertise(ans): """ - user should not have working experince in this area + user should not have working experience in this area """ return ans in config['7_working_area'] @@ -138,7 +138,7 @@ def evaluate_asnwers(answe_path): if not r: report['6_audio_test'] += 1 judge['6_'] = r if '7_working_area' in fields_to_consider: - r = check_working_experities(row['Answer.7_working_area']) + r = check_working_expertise(row['Answer.7_working_area']) accept = accept and r if not r: report['7_working_area'] += 1 judge['7_'] = r diff --git a/src/result_parser.py b/src/result_parser.py index e9e0112..5b21ef5 100644 --- a/src/result_parser.py +++ b/src/result_parser.py @@ -141,7 +141,7 @@ def check_audio_played(row, method): """ check if all audios for questions played until the end :param row: - :param method: acr,dcr, ot ccr + :param method: acr, dcr, or ccr :return: """ question_played = 0 @@ -684,7 +684,7 @@ def check_qualification_answer(row): def check_a_cmp(file_a, file_b, ans, audio_a_played, audio_b_played): """ - check if pair comparision answered correctly + check if pair comparison answered correctly :param file_a: :param file_b: :param ans: @@ -721,7 +721,7 @@ def data_cleaning(filename, method): reader = csv.DictReader(csvfile) # lowercase the fieldnames reader.fieldnames = [field.strip().lower().replace('_slide','') for field in reader.fieldnames] - # ----------- pair comparision + # ----------- pair comparison # Input.CMP1_A Input.CMP1_B Input.CMP2_A Input.CMP2_B Input.CMP3_A Input.CMP3_B Input.CMP4_A Input.CMP4_B # Answer.cmp1 Answer.cmp2 Answer.cmp3 Answer.cmp4 # Answer.audio_n_play_CMP1_A Answer.audio_n_play_CMP1_B Answer.audio_n_play_CMP2_A Answer.audio_n_play_CMP2_B @@ -776,7 +776,7 @@ def data_cleaning(filename, method): # step2. check math d['correct_math'] = 1 if check_math(row['input.math'], row['answer.math'], row['answer.audio_n_play_math1']) else 0 - # step3. check pair comparision + # step3. check pair comparison for i in range(1, 5): if check_a_cmp(row[f'input.cmp{i}_a'], row[f'input.cmp{i}_b'], row[f'answer.cmp{i}'], row[f'answer.audio_n_play_cmp{i}_a'], @@ -799,7 +799,7 @@ def data_cleaning(filename, method): if method =="p804" and 'url_2' in rec: gold_question_wrong = (1 if rec['wrong']>0 else 0)+ (2 if rec['wrong_2']>0 else 0) d["gold_question_wrong"] = gold_question_wrong - # remove the commnet to only reject on first gold question + # remove the comment to only reject on first gold question #if rec['wrong']==0: # d["correct_gold_question"] = 1 if 'url_2' in rec: @@ -870,12 +870,12 @@ def data_cleaning(filename, method): # new from p910 def evaluate_rater_performance(data, use_sessions, reject_on_failure=False): """ - Evaluate the workers performance based on the following criteria in cofnig file: + Evaluate the workers performance based on the following criteria in config file: rater_min_acceptance_rate_current_test rater_min_accepted_hits_current_test :param data: :param use_sessions: - :param reject_on_failure: if True, check the criteria on [acceptance_criteria] otehrwise check it in the + :param reject_on_failure: if True, check the criteria on [acceptance_criteria] otherwise check it in the [accept_and_use] section of config file. :return: """ @@ -912,7 +912,7 @@ def evaluate_rater_performance(data, use_sessions, reject_on_failure=False): grouped_rej = grouped[(grouped.acceptance_rate < rater_min_acceptance_rate_current_test) | (grouped.used_count < rater_min_accepted_hits_current_test)] n_submission_removed_only_for_performance = grouped_rej['used_count'].sum() - print(f'{n_submission_removed_only_for_performance} sessions are removed only becuase of performance criteria ({section}).') + print(f'{n_submission_removed_only_for_performance} sessions are removed only because of performance criteria ({section}).') workers_list_to_remove = list(grouped_rej['worker_id']) result = [] @@ -1002,7 +1002,7 @@ def save_approve_rejected_ones_for_gui(data, path): small_df.to_csv(path, index=False) -def save_approved_ones(data, path, failurs_res=[]): +def save_approved_ones(data, path, failures_res=[]): """ save approved results in the given path :param data: @@ -1014,10 +1014,10 @@ def save_approved_ones(data, path, failurs_res=[]): c_accepted = df.shape[0] df = df[df.status == 'Submitted'] if df.shape[0] == c_accepted: - print(f" {c_accepted} answers are accepted; failurs: {list(collections.Counter(failurs_res).items())}") + print(f" {c_accepted} answers are accepted; failures: {list(collections.Counter(failures_res).items())}") else: print( - f" overall {c_accepted} answers are accepted, from them {df.shape[0]} were in submitted status; failurs: {list(collections.Counter(failurs_res).items())}" + f" overall {c_accepted} answers are accepted, from them {df.shape[0]} were in submitted status; failures: {list(collections.Counter(failures_res).items())}" ) small_df = df[["assignment"]].copy() small_df.rename(columns={"assignment": "assignmentId"}, inplace=True) @@ -1109,8 +1109,8 @@ def calc_quantity_bonuses(answer_list, conf, path): eligible_all = list(grouped['worker_id']) new_eligible = list(set(eligible_all)-set(old_eligible)) - # the bonus should be given to the tasks that are either automatically accepted or submited. The one with status - # accepted should have been already payed. + # the bonus should be given to the tasks that are either automatically accepted or submitted. The one with status + # accepted should have been already paid. filtered_answers = filter_answer_by_status_and_workers(df, eligible_all, new_eligible, conf) # could be also accept_and_use grouped = filtered_answers.groupby(['worker_id'], as_index=False)['accept'].sum() @@ -1150,7 +1150,7 @@ def calc_inter_rater_reliability(answer_list, overall_mos, test_method, question aggregate_on = 'condition_name' else: aggregate_on = 'file_url' - # if it is per file, make sure to consider clips tha has at least 3 votes + # if it is per file, make sure to consider clips that have at least 3 votes tmp = tmp[tmp['n'] >= 3] c_df = tmp[[aggregate_on, mos_name]].copy() c_df.rename(columns={mos_name: 'mos'}, inplace=True) @@ -1374,7 +1374,7 @@ def dict_value_to_key(d, value): def transform(test_method, sessions, agrregate_on_condition, is_worker_specific): """ Given the valid sessions from answer.csv, group votes per files, and per conditions. - Assumption: file name conatins the condition name/number, which can be extracted using "condition_patten" . + Assumption: file name contains the condition name/number, which can be extracted using "condition_pattern" . :param sessions: :return: """ @@ -1650,7 +1650,7 @@ def calc_correlation(cs, lab, spearman=False): def number_of_uniqe_workers(answers): """ - return numbe rof unique workers + return number of unique workers :param answers: :return: """