Skip to content

Commit ade17e3

Browse files
committed
For 2021 run
1 parent 522ac0b commit ade17e3

File tree

6 files changed

+242
-64
lines changed

6 files changed

+242
-64
lines changed

Project.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "NeidSolarScripts"
22
uuid = "71cacfaf-3521-4e83-9e04-9e5e1461b034"
33
authors = ["Eric Ford", "Shubham Kanodia", "Andrea Lin"]
4-
version = "0.1.2"
4+
version = "0.1.3"
55

66
[deps]
77
ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63"
@@ -29,6 +29,7 @@ MultivariateStats = "6f286f6a-111f-5878-ab1e-185364afe411"
2929
NaNMath = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
3030
NeidArchive = "b2c48323-12ae-41a0-acdb-062d24b276bc"
3131
OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
32+
PkgVersion = "eebad327-c553-4316-9ea0-9fa01ccd7688"
3233
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
3334
Pluto = "c3e4b0f8-55cb-11ea-2926-15256bba5781"
3435
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"

examples/calc_order_ccfs_using_continuum_1.1.jl

Lines changed: 33 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -69,9 +69,12 @@ println("# Parsing arguments...")
6969
add_arg_group!(s, "Line list parameters", :argg_line_list_param)
7070
@add_arg_table! s begin
7171
"--line_list_filename"
72-
help = "Line list file"
72+
help = "Line list filename (input)"
7373
arg_type = String
7474
#default = joinpath(pkgdir(NeidSolarScripts),"data","solar_line_list_espresso.csv")
75+
"--line_list_output_filename"
76+
help = "Line list filename (output)"
77+
arg_type = String
7578
"--recompute_line_weights"
7679
help = "Force recalculation of SNR-based line weight factors."
7780
action = :store_true
@@ -98,6 +101,9 @@ println("# Parsing arguments...")
98101
help = "Filename for anchor locations to use in computing continuum to normalize by."
99102
arg_type = String
100103
#default = "/home/eford/Code/RvSpectMLEcoSystem/NeidSolarScripts/data/neidMaster_HR_SmoothLampSED_20210101.fits"
104+
"--anchors_filename_output"
105+
help = "Filename to write anchor locations to use in computing continuum to normalize by."
106+
arg_type = String
101107
"--smoothing_half_width"
102108
help = "Half width for window to use for smoothing prior to findind local maximum."
103109
arg_type = Int64
@@ -191,7 +197,8 @@ println("# Parsing arguments...")
191197
"--max_num_spectra_clean"
192198
help = "Maximum number of spectra to include in clean spectrum."
193199
arg_type = Int
194-
default = 65 # based on 60 minutes of integration time from 55s exposures
200+
default = 130 # based on 120 minutes of integration time from 55s exposures
201+
#default = 65 # based on 60 minutes of integration time from 55s exposures
195202
end
196203

197204
return parse_args(s)
@@ -326,8 +333,8 @@ if verbose println("# Reading manifest of files to process.") end
326333
if args["target"] == "Sun" || args["target"] == "Solar"
327334
@assert hasproperty(df_files,:alt_sun) # TODO: Compute if not avaliable?
328335
@assert hasproperty(df_files,:Δfwhm²) # TODO: Compute if not avaliable?
329-
if !hasproperty(df_files,:solar_hour_angle)
330-
df_files[!,"solar_hour_angle"] = SolarRotation.get_solar_hour_angle(df_files.bjd,obs=:WIYN)
336+
if !hasproperty(df_files,:hour_angle)
337+
df_files[!,"hour_angle"] = SolarRotation.get_solar_hour_angle(df_files.bjd,obs=:WIYN)
331338
end
332339
#if eltype(df_files[!,:order_snrs]) == String # TODO: Compute if not avaliable?
333340
end
@@ -367,7 +374,7 @@ if verbose println("# Reading manifest of files to process.") end
367374
@filter( args["max_airmass"] == nothing || _.airmass <= args["max_airmass"] ) |>
368375
DataFrame
369376
df_files_use = df_files_use |>
370-
@filter( args["max_solar_hour_angle"] == nothing || abs(_.solar_hour_angle) <= args["max_solar_hour_angle"] ) |>
377+
@filter( args["max_solar_hour_angle"] == nothing || abs(_.hour_angle) <= args["max_solar_hour_angle"] ) |>
371378
DataFrame
372379
df_files_use = df_files_use |>
373380
@filter( args["start_time"] == nothing || Time(julian2datetime(_.bjd)) >= start_time ) |>
@@ -409,13 +416,15 @@ df_files_cleanest = df_files_use |>
409416
@filter( min_drp_minor_version <= Base.thisminor(VersionNumber(_.drpversion)) <= max_drp_minor_version ) |>
410417
#@filter( _.airmass <= 2.0 ) |>
411418
@filter( _.airmass <= args["max_airmass_clean"] ) |>
412-
@filter( abs(_.solar_hour_angle) <= args["max_solar_hour_angle_clean"] ) |>
413-
@orderby( abs(_.solar_hour_angle) ) |>
419+
@filter( abs(_.hour_angle) <= args["max_solar_hour_angle_clean"] ) |>
420+
@orderby( abs(_.hour_angle) ) |>
414421
@take(args["max_num_spectra_clean"] ) |>
415422
DataFrame
416423
println("# Found ", size(df_files_cleanest,1), " files considered clean.")
417424
#@assert size(df_files_cleanest,1) >= 1
418425

426+
clean_obs_mask = map(fn->in(fn, df_files_cleanest.Filename),df_files_use.Filename)
427+
419428
if verbose println(now()) end
420429

421430
if @isdefined(sed_filename)
@@ -534,16 +543,20 @@ pipeline_plan = PipelinePlan()
534543
@assert isfile(args["anchors_filename"]) && filesize(args["anchors_filename"])>0
535544
anchors = load(args["anchors_filename"],"anchors")
536545
else
546+
println("# Computing Continuum model.")
537547
if @isdefined sed
538548
@warn("DRP v1.1 now provides blaze in L2 file. This script has not been updated to use explicit an SED model.")
539549
(anchors, continuum, f_filtered) = Continuum.calc_continuum(spec.λ, mean_clean_flux_sed_normalized, mean_clean_var_sed_normalized; fwhm = args["fwhm_continuum"]*1000, ν = args["nu_continuum"],
540550
stretch_factor = args["stretch_factor"], merging_threshold = args["merging_threshold"], smoothing_half_width = args["smoothing_half_width"], min_R_factor = args["min_rollingpin_r"],
541551
orders_to_use = orders_to_use_for_continuum, verbose = false )
552+
save(args["anchors_filename_output"], Dict("anchors" => anchors) )
542553
else
543554
(anchors, continuum, f_filtered) = Continuum.calc_continuum(spec.λ, mean_clean_flux, mean_clean_var; fwhm = args["fwhm_continuum"]*1000, ν = args["nu_continuum"],
544555
stretch_factor = args["stretch_factor"], merging_threshold = args["merging_threshold"], smoothing_half_width = args["smoothing_half_width"], min_R_factor = args["min_rollingpin_r"],
545556
orders_to_use = orders_to_use_for_continuum, verbose = false )
557+
save(args["anchors_filename_output"], Dict("anchors" => anchors) )
546558
end
559+
println("# Stored anchors used for continuum model.")
547560
end
548561
normalization_anchors_list = anchors
549562

@@ -575,7 +588,9 @@ line_width = line_width_50_default
575588
max_mask_scale_factor = max(mask_scale_factor, max(lsf_width,
576589
line_width/sqrt(8*log(2)))/default_ccf_mask_v_width(NEID2D())) # 4.0
577590
max_bc = RvSpectMLBase.max_bc
578-
#max_bc = RvSpectMLBase.max_bc_earth_rotation
591+
if args["target"] == Sun || args["target"] == "Solar"
592+
max_bc = RvSpectMLBase.max_bc_solar
593+
end
579594
#max_orders = min_order(NEID2D()):max_order(NEID2D())
580595
#good_orders = orders_to_use_default(NEID2D())
581596
#orders_to_use = max_orders
@@ -589,11 +604,16 @@ line_width = line_width_50_default
589604
#orders_to_use = good_orders
590605
#order_list_timeseries = extract_orders(all_spectra,pipeline_plan, orders_to_use=orders_to_use, recalc=true )
591606
touch(line_list_filename)
592-
#line_list_filename = "G2.espresso.mas"
593-
linelist_for_ccf_fn_w_path = joinpath(pkgdir(EchelleCCFs),"data","masks",line_list_filename)
594-
line_list_espresso = prepare_line_list(linelist_for_ccf_fn_w_path, all_spectra, pipeline_plan, v_center_to_avoid_tellurics=ccf_mid_velocity,
607+
if (isfile(line_list_filename) && (filesize(line_list_filename)>0))
608+
line_list_input_filename = line_list_filename
609+
else
610+
line_list_input_filename = joinpath(pkgdir(EchelleCCFs),"data","masks","espresso+neid_mask_97_to_108.mas")
611+
end
612+
line_list_espresso = prepare_line_list(line_list_input_filename, all_spectra, pipeline_plan, v_center_to_avoid_tellurics=ccf_mid_velocity,
595613
Δv_to_avoid_tellurics = 2*max_bc+range_no_mask_change*line_width_50_default+max_mask_scale_factor*default_ccf_mask_v_width(NEID2D()), orders_to_use=#=orders_to_use=#56:108, recalc=true, verbose=true)
596-
#CSV.write(line_list_filename, line_list_espresso)
614+
if args["recompute_line_weights"] && !isnothing(args["line_list_output_filename"])
615+
CSV.write(args["line_list_output_filename"], line_list_espresso)
616+
end
597617
end
598618
#file_hashes[line_list_filename] = bytes2hex(sha256(line_list_filename))
599619
file_hashes[line_list_filename] = bytes2hex(open(md5,line_list_filename))
@@ -637,6 +657,7 @@ println("# Saving results to ", daily_ccf_filename, ".")
637657
f["Δfwhm"] = Δfwhm
638658
f["orders_to_use"] = orders_to_use
639659
f["manifest"] = df_files_use
660+
f["clean_obs_mask"] = clean_obs_mask
640661
f["calc_order_ccf_args"] = args
641662
f["ccf_line_list"] = line_list_espresso
642663
if (size(df_files_cleanest,1) >= 1) && any(.!iszero.(mean_clean_flux))

examples/combine_daily_reports_v1.0.jl

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -109,13 +109,13 @@ function array_of_dict_to_dataframe( aod::AOD; first_cols::AbstractVector{String
109109
if !haskey(first(aod), k) || in(k, exclude_cols)
110110
continue
111111
end
112-
df[!,k] = map(i->aod[i][k],1:size(aod,1))
112+
df[!,k] = map(i->haskey(aod[i],k) ? aod[i][k] : missing,1:size(aod,1))
113113
end
114114
for k in sort!(collect(keys(first(aod))))
115115
if in(k, first_cols) || in(k, exclude_cols)
116116
continue
117117
end
118-
df[!,k] = map(i->aod[i][k],1:size(aod,1))
118+
df[!,k] = map(i->haskey(aod[i],k) ? aod[i][k] : missing,1:size(aod,1))
119119
end
120120
return df
121121
end
@@ -145,7 +145,8 @@ resize!(daily,num_days_with_usable_obs)
145145
@info "# Making dataframe"
146146
daily_flat = flatten_dict.(daily)
147147
first_cols = ["obs_date.string", "obs_date.mean_bjd","num_rvs.good"]
148-
cols_to_exclude = ["report.hostname", "title", "report.input_md5", "report.input_file"]
148+
cols_to_exclude = ["report.hostname", "title", "report.input_md5", "report.input_file","report.date_run","report.input_ctime",
149+
"report.versions.EchelleCCFs","report.versions.EchelleInstruments","report.versions.NeidSolarScripts","report.versions.RvSpectML","report.versions.RvSpectMLBase","report.versions.SunAsAStar"]
149150
df = array_of_dict_to_dataframe( daily_flat, first_cols=first_cols, exclude_cols=cols_to_exclude )
150151

151152
@info "# Sorting dataframe"

0 commit comments

Comments
 (0)