Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
=======
## 0.148.1 (2025-04-22)

### Fix

- added bank holiday offsets.
- added bank holiday offsets.

## 0.148.0 (2025-04-19)

### Feat
Expand Down
2 changes: 1 addition & 1 deletion custom_components/uk_bin_collection/config_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ async def async_step_reconfigure_confirm(

async def get_councils_json(self) -> Dict[str, Any]:
"""Fetch and return the supported councils data, including aliases and sorted alphabetically."""
url = "https://raw.githubusercontent.com/robbrad/UKBinCollectionData/0.148.0/uk_bin_collection/tests/input.json"
url = "https://raw.githubusercontent.com/robbrad/UKBinCollectionData/0.148.1/uk_bin_collection/tests/input.json"
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
Expand Down
4 changes: 2 additions & 2 deletions custom_components/uk_bin_collection/manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"integration_type": "service",
"iot_class": "cloud_polling",
"issue_tracker": "https://github.com/robbrad/UKBinCollectionData/issues",
"requirements": ["uk-bin-collection>=0.148.0"],
"version": "0.148.0",
"requirements": ["uk-bin-collection>=0.148.1"],
"version": "0.148.1",
"zeroconf": []
}
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "uk_bin_collection"
version = "0.148.0"
version = "0.148.1"
description = "Python Lib to collect UK Bin Data"
readme = "README.md"
authors = ["Robert Bradley <robbrad182@gmail.com>"]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# import re

import requests
from bs4 import BeautifulSoup
from dateutil.parser import parse

from uk_bin_collection.uk_bin_collection.common import (
check_postcode,
Expand Down Expand Up @@ -232,11 +232,46 @@ def parse_data(self, page: str, **kwargs: str) -> dict[str, list[dict[str, str]]
)
garden_dates: list[str] = get_dates_every_x_days(week[garden_week], 14, 28)

# Build a dictionary of bank holiday changes
bank_holiday_bins_url = "https://www.cheltenham.gov.uk/bank-holiday-collections"
response = requests.get(bank_holiday_bins_url)
soup = BeautifulSoup(response.content, "html.parser")
response.close()
tables = soup.find_all("table")

# Build a dictionary to modify any bank holiday collections
bh_dict = {}
for table in tables:
# extract table body
for row in table.find_all("tr")[1:]:
if row.find_all("td")[1].text.strip() == "Normal collection day":
bh_dict[
parse(
row.find_all("td")[0].text.strip(),
dayfirst=True,
fuzzy=True,
).date()
] = parse(
row.find_all("td")[0].text.strip(), dayfirst=True, fuzzy=True
).date()
else:
bh_dict[
parse(
row.find_all("td")[0].text.strip(),
dayfirst=True,
fuzzy=True,
).date()
] = parse(
row.find_all("td")[1].text.strip(), dayfirst=True, fuzzy=True
).date()

for refuse_date in refuse_dates:
collection_date = (
datetime.strptime(refuse_date, "%d/%m/%Y")
+ timedelta(days=refuse_day_offset)
).strftime("%d/%m/%Y")
collection_date = datetime.strptime(refuse_date, "%d/%m/%Y") + timedelta(
days=refuse_day_offset
)
if collection_date in bh_dict:
collection_date = bh_dict[collection_date]
collection_date = collection_date.strftime("%d/%m/%Y")

dict_data = {
"type": "Refuse Bin",
Expand All @@ -246,10 +281,12 @@ def parse_data(self, page: str, **kwargs: str) -> dict[str, list[dict[str, str]]

for recycling_date in recycling_dates:

collection_date = (
datetime.strptime(recycling_date, "%d/%m/%Y")
+ timedelta(days=recycling_day_offset)
).strftime("%d/%m/%Y")
collection_date = datetime.strptime(recycling_date, "%d/%m/%Y") + timedelta(
days=recycling_day_offset
)
if collection_date in bh_dict:
collection_date = bh_dict[collection_date]
collection_date = collection_date.strftime("%d/%m/%Y")

dict_data = {
"type": "Recycling Bin",
Expand All @@ -259,10 +296,12 @@ def parse_data(self, page: str, **kwargs: str) -> dict[str, list[dict[str, str]]

for garden_date in garden_dates:

collection_date = (
datetime.strptime(garden_date, "%d/%m/%Y")
+ timedelta(days=garden_day_offset)
).strftime("%d/%m/%Y")
collection_date = datetime.strptime(garden_date, "%d/%m/%Y") + timedelta(
days=garden_day_offset
)
if collection_date in bh_dict:
collection_date = bh_dict[collection_date]
collection_date = collection_date.strftime("%d/%m/%Y")

dict_data = {
"type": "Garden Waste Bin",
Expand All @@ -279,10 +318,12 @@ def parse_data(self, page: str, **kwargs: str) -> dict[str, list[dict[str, str]]

for food_date in food_dates:

collection_date = (
datetime.strptime(food_date, "%d/%m/%Y")
+ timedelta(days=food_day_offset)
).strftime("%d/%m/%Y")
collection_date = datetime.strptime(food_date, "%d/%m/%Y") + timedelta(
days=food_day_offset
)
if collection_date in bh_dict:
collection_date = bh_dict[collection_date]
collection_date = collection_date.strftime("%d/%m/%Y")

dict_data = {
"type": "Food Waste Bin",
Expand Down Expand Up @@ -313,10 +354,12 @@ def parse_data(self, page: str, **kwargs: str) -> dict[str, list[dict[str, str]]

for food_date in food_dates_first:

collection_date = (
datetime.strptime(food_date, "%d/%m/%Y")
+ timedelta(days=food_day_offset)
).strftime("%d/%m/%Y")
collection_date = datetime.strptime(food_date, "%d/%m/%Y") + timedelta(
days=food_day_offset
)
if collection_date in bh_dict:
collection_date = bh_dict[collection_date]
collection_date = collection_date.strftime("%d/%m/%Y")

dict_data = {
"type": "Food Waste Bin",
Expand All @@ -325,10 +368,12 @@ def parse_data(self, page: str, **kwargs: str) -> dict[str, list[dict[str, str]]
bindata["bins"].append(dict_data)
for food_date in food_dates_second:

collection_date = (
datetime.strptime(food_date, "%d/%m/%Y")
+ timedelta(days=second_week_offset)
).strftime("%d/%m/%Y")
collection_date = datetime.strptime(food_date, "%d/%m/%Y") + timedelta(
days=second_week_offset
)
if collection_date in bh_dict:
collection_date = bh_dict[collection_date]
collection_date = collection_date.strftime("%d/%m/%Y")

dict_data = {
"type": "Food Waste Bin",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def parse_data(self, page: str, **kwargs) -> dict:
collections = []

# Find section with bins in
table = soup.find_all("table", {"class": "hbcRounds"})[1]
table = soup.find_all("table", {"class": "hbcRounds"})[0]

# For each bin section, get the text and the list elements
for row in table.find_all("tr"):
Expand Down
Loading