Skip to content

Commit dbcb3f1

Browse files
committed
mobs rl submission
1 parent d0ea9eb commit dbcb3f1

File tree

2 files changed

+4748
-0
lines changed

2 files changed

+4748
-0
lines changed
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
team_name: "MOBS Lab at Northeastern University"
2+
team_abbr: "MOBS"
3+
model_name: "GLEAM Flu Reinforcement Learning Forecasting Model"
4+
model_abbr: "GLEAM_RL_FLUH"
5+
model_version: "1.0"
6+
model_contributors: [
7+
{
8+
"name": "Alessandro Vespignani",
9+
"affiliation": "MOBS Lab, Northeastern University",
10+
"email": "alexves@gmail.com"
11+
},
12+
{
13+
"name": "Jessica T. Davis",
14+
"affiliation": "MOBS Lab, Northeastern University",
15+
"email": "jes.davis@northeastern.edu"
16+
},
17+
{
18+
"name": "Clara Bay",
19+
"affiliation": "MOBS Lab, Northeastern University",
20+
"email": "bay.c@northeastern.edu"
21+
},
22+
{
23+
"name": "Stefania Fiandrino",
24+
"affiliation": "Sapienza, University of Rome, ISI Foundation",
25+
"email": "stefania.fiandrino@isi.it"
26+
},
27+
{
28+
"name": "Nicolò Gozzi",
29+
"affiliation": "ISI Foundation",
30+
"email": "nicolo.gozzi@isi.it"
31+
},
32+
{
33+
"name": "Sara Venturini",
34+
"affiliation": "MOBS Lab, Northeastern University",
35+
"email": "s.venturini@northeastern.edu"
36+
},
37+
{
38+
"name": "Alessandra Urbinati",
39+
"affiliation": "MOBS Lab, Northeastern University",
40+
"email": "a.urbinati@northeastern.edu"
41+
}
42+
43+
]
44+
45+
46+
website_url: "https://www.mobs-lab.org/"
47+
license: "CC-BY_SA-4.0"
48+
designated_model: true
49+
citation: " "
50+
team_funding: "We acknowledge support from cooperative agreement CDC-RFA-FT-23-0069 from the CDC’s Center for Forecasting and Outbreak Analytics."
51+
methods: "Tabular Q-learning with trajectory selection for flu hospitalization forecasting.."
52+
data_inputs: "Weekly incident flu hospitalizations from HHS"
53+
methods_long: "This model uses tabular Q-learning to forecast flu hospitalizations by learning to select relevant epidemic trajectories generated by the GLEAM model. The system operates over a 256-action space where each action represents selection of a specific GLEAM-simulated trajectory. The state representation combines epidemic features: trend angle and magnitude. Through reinforcement learning, the agent learns an optimal policy for selecting which GLEAM trajectories provide the best forecasts given the current epidemic context. The selected trajectories are used to generate probabilistic forecasts of future hospitalizations."
54+
ensemble_of_models: false
55+
ensemble_of_hub_models: false

0 commit comments

Comments
 (0)