Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ jobs:
curl -sSL https://install.python-poetry.org | python3 - --version $POETRY_VERSION
poetry --version

- name: Add Poetry to PATH
run: echo "$HOME/.local/bin" >> $GITHUB_PATH

- name: Cache Poetry virtual environment
uses: actions/cache@v3
with:
Expand Down
8 changes: 1 addition & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,18 +66,12 @@ testing, linting, and more.
This project uses [Poetry](https://python-poetry.org/) for dependency management.
To set up your development environment:

1. **Create and activate the virtual environment:**
**Create and activate the virtual environment:**

```bash
make .venv
```

2. **Install project dependencies:**

```bash
poetry install
```

### Running Normalisation Scripts

Each data source module under `src` contains scripts for data normalisation.
Expand Down
221 changes: 221 additions & 0 deletions notebooks/exploration.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,221 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Notebook: Exploration of csv files"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\"\"\"Exploration notebook for data analysis.\n",
"\n",
"This notebook contains data exploration steps for disaster analysis.\n",
"\"\"\"\n",
"\n",
"import hashlib\n",
"import sys\n",
"from pathlib import Path\n",
"\n",
"import pandas as pd\n",
"\n",
"from src.data_consolidation.dictionary import STANDARD_COLUMNS\n",
"\n",
"module_path = Path(\"..\").resolve()\n",
"sys.path.append(str(module_path))"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [],
"source": [
"def read_dat(dat_file: str) -> pd:\n",
" \"\"\"Reads a CSV file from the data_prep directory.\"\"\"\n",
" dat_dir = Path(\"../data_prep/\").resolve()\n",
" dat_path = dat_dir / dat_file\n",
" return pd.read_csv(dat_path)"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [],
"source": [
"glide_prep_df = read_dat(\"glide_prep.csv\")\n",
"gdacs_prep_df = read_dat(\"gdacs_prep.csv\")\n",
"emdat_prep_df = read_dat(\"emdat_prep.csv\")\n",
"disaster_charter_df = read_dat(\"disaster_charter_prep.csv\")\n",
"cerf_df = read_dat(\"cerf_prep.csv\")\n",
"idmc_df = read_dat(\"idmc_prep.csv\")\n",
"ifrc_df = read_dat(\"ifrc_prep.csv\")"
]
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [],
"source": [
"pre_dfs = [\n",
" glide_prep_df,\n",
" gdacs_prep_df,\n",
" emdat_prep_df,\n",
" disaster_charter_df,\n",
" cerf_df,\n",
" idmc_df,\n",
" ifrc_df,\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 69,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/tmp/ipykernel_18473/1884474460.py:8: FutureWarning: The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.\n",
" all_data = pd.concat(pre_dfs, ignore_index=True)\n"
]
}
],
"source": [
"for i, df in enumerate(pre_dfs):\n",
" missing_cols = set(STANDARD_COLUMNS) - set(df.columns)\n",
" for col in missing_cols:\n",
" df[col] = None\n",
" df_standard = df[STANDARD_COLUMNS]\n",
" pre_dfs[i] = df_standard\n",
"\n",
"all_data = pd.concat(pre_dfs, ignore_index=True)\n",
"all_data[\"Date\"] = pd.to_datetime(all_data[\"Date\"], errors=\"coerce\")\n",
"group_key = [\"Event_Type\", \"Country\"]"
]
},
{
"cell_type": "code",
"execution_count": 72,
"metadata": {},
"outputs": [],
"source": [
"def consolidate_group(group: pd.DataFrame) -> dict:\n",
" \"\"\"Consolidates a group of data.\"\"\"\n",
" consolidated_row = {}\n",
" event_ids = sorted(set(group[\"Source_Event_IDs\"].dropna().astype(str).tolist()))\n",
" consolidated_row[\"Event_ID\"] = event_ids\n",
" unique_str = \"|\".join(event_ids)\n",
" disaster_impact_id = \"DI_\" + hashlib.sha256(unique_str.encode(\"utf-8\")).hexdigest()\n",
" consolidated_row[\"Disaster_Impact_ID\"] = disaster_impact_id\n",
" for column in group.columns:\n",
" if column in group_key or column in [\"Event_ID\", \"Disaster_Impact_ID\"]:\n",
" if column == \"Disaster_Impact_ID\":\n",
" continue\n",
" consolidated_row[column] = sorted(\n",
" set(group[column].dropna().astype(str).tolist()),\n",
" )\n",
" else:\n",
" values = group[column].dropna().tolist()\n",
" if values:\n",
" if all(isinstance(val, list) for val in values):\n",
" flat_values = [item for sublist in values for item in sublist]\n",
" consolidated_row[column] = sorted(set(map(str, flat_values)))\n",
" else:\n",
" consolidated_row[column] = sorted(set(map(str, values)))\n",
" else:\n",
" consolidated_row[column] = None\n",
" return consolidated_row"
]
},
{
"cell_type": "code",
"execution_count": 73,
"metadata": {},
"outputs": [],
"source": [
"all_data[\"Date_Group\"] = all_data[\"Date\"].apply(\n",
" lambda x: (\n",
" x - pd.Timedelta(days=7),\n",
" x + pd.Timedelta(days=7),\n",
" )\n",
" if pd.notna(x)\n",
" else (None, None),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def group_by_date_range(data: pd.DataFrame, date_col: str) -> list:\n",
" \"\"\"Groups data by date range.\"\"\"\n",
" rows = []\n",
" used_indices = set()\n",
" for idx, row in data.iterrows():\n",
" if idx in used_indices or pd.isna(row[date_col]):\n",
" continue\n",
" matching_rows = data[\n",
" (data[date_col] >= row[\"Date_Group\"][0])\n",
" & (data[date_col] <= row[\"Date_Group\"][1])\n",
" & (data[\"Event_Type\"] == row[\"Event_Type\"])\n",
" & (data[\"Country\"] == row[\"Country\"])\n",
" ]\n",
" used_indices.update(matching_rows.index)\n",
" rows.append(consolidate_group(matching_rows))\n",
" return rows\n",
"\n",
"\n",
"unified_rows = group_by_date_range(all_data, \"Date\")\n",
"unified_df = pd.DataFrame(unified_rows)\n",
"unified_df = unified_df[STANDARD_COLUMNS]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Matrix is empty (all zeros); no Circos plot to display.\n"
]
}
],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
48 changes: 47 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ pycirclize = "^1.8.0"
circlify = "^0.15.0"
azure-storage-blob = "^12.24.1"
azure-identity = "^1.19.0"
nbqa = "^1.9.1"

[tool.poetry.group.dev.dependencies]
pre-commit = "^3.8.0"
Expand Down
Loading