Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions examples/lifelong_learning/cityscapes/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,6 @@ job_name=robo-demo
Before this, users must generate the S3_ENDPOINT, ACCESS_KEY_ID and SECRET_ACCESS_KEY of their own s3 accounts and set environment
variables S3_ENDPOINT, ACCESS_KEY_ID and SECRET_ACCESS_KEY.
```
S3_ENDPOINT=obs.cn-north-1.myhuaweicloud.com
ACCESS_KEY_ID=8CH9JVNJF6TGK2IAYUDR

action=${1:-create}

kubectl $action -f - <<EOF
Expand Down Expand Up @@ -119,6 +116,8 @@ spec:
env:
- name: "train_ratio"
value: "0.9"
- name: "gpu_ids"
value: "0"
resources:
limits:
cpu: 6
Expand Down Expand Up @@ -158,6 +157,8 @@ spec:
value: "<"
- name: "model_threshold" # Threshold for filtering deploy models
value: "0"
- name: "gpu_ids"
value: "0"
resources:
limits:
cpu: 6
Expand All @@ -179,6 +180,8 @@ spec:
# args: ["while true; do sleep 2000; done;"]
args: ["predict.py"]
env:
- name: "gpu_ids"
value: "0"
- name: "test_data"
value: "/home/data/test_data"
- name: "unseen_save_url"
Expand Down
9 changes: 5 additions & 4 deletions examples/lifelong_learning/cityscapes/RFNet/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,13 +181,14 @@ def save_predicted_image(img_url, image, pred, image_name):
Sample post processing function invoked by Sedna
to upload the inference results
'''

raw_image_name = os.path.join(img_url, image_name)
image.save(raw_image_name)
print(f"raw img is {image}")

merge_label_name = os.path.join(img_url, f"merge_{image_name}")
color_label_name = os.path.join(img_url, f"color_{image_name}")
label_name = os.path.join(img_url, f"label_{image_name}")
os.makedirs(os.path.dirname(merge_label_name), exist_ok=True)
os.makedirs(os.path.dirname(color_label_name), exist_ok=True)
os.makedirs(os.path.dirname(label_name), exist_ok=True)

# Save prediction images
pred = torch.from_numpy(pred).byte()
Expand All @@ -200,7 +201,7 @@ def save_predicted_image(img_url, image, pred, image_name):
pre_label_image = ToPILImage()(pre_label)
pre_label_image.save(label_name)

return (merge_label_name, color_label_name, label_name)
return (raw_image_name, merge_label_name, color_label_name, label_name)


def image_merge(image, label, save_name):
Expand Down
44 changes: 31 additions & 13 deletions examples/lifelong_learning/cityscapes/RFNet/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,23 @@
import os
# os.environ["MODEL_URLS"] = "s3://kubeedge/sedna-robo/kb/index.pkl"
# os.environ["KB_SERVER"] = "http://127.0.0.1:9020"
# os.environ["test_data"] = "/data/test_data"
# os.environ["test_data"] = "/home/lsq/RFNet/e1_labeled_0/2048x1024/rgb/test/20220420_front"
# os.environ["unseen_save_url"] = "s3://kubeedge/sedna-robo/unseen_samples/"
# os.environ["seen_save_url"] = "s3://kubeedge/sedna-robo/seen_samples/"
# os.environ["metadata_url"] = "s3://kubeedge/sedna-robo/metadata/"
# os.environ["OUTPUT_URL"] = "s3://kubeedge/sedna-robo/"

# os.environ["OOD_thresh"] = "0.45"
# os.environ["OOD_thresh"] = "0.5"
# os.environ["OOD_model"] = "https://kubeedge.obs.cn-north-1.myhuaweicloud.com/sedna-robo/models/garden.model"
# os.environ["ramp_detection"] = "https://kubeedge.obs.cn-north-1.myhuaweicloud.com/sedna-robo/models/garden.pth"


import numpy as np
from PIL import Image
from sedna.datasources import BaseDataSource
from sedna.core.lifelong_learning import LifelongLearning
from sedna.common.config import Context
from sedna.common.log import LOGGER
from sedna.common.file_ops import FileOps

from interface import Estimator, preprocess_frames, save_predicted_image

Expand All @@ -38,13 +39,30 @@ def postprocess(samples):
return image_names, imgs


def unseen_sample_postprocess(img_url, img, img_name):
img_url = os.path.join(img_url, img_name)
def unseen_sample_postprocess(img_url, img, ood_pred, img_name):
if isinstance(img, str):
return FileOps.upload(img, img_url)
else:
img.save(img_url)
return img_url
img = Image.open(img)

if len(ood_pred) > 0:
img = img.convert("RGBA")

colored_img = np.array(Image.new("RGBA", img.size, (255, 255, 255, 0)))
changed_pixel = ood_pred[0]
colored_img[changed_pixel == 1] = [255, 0, 0, 150]
colored_img = Image.fromarray(colored_img)

img_color = Image.alpha_composite(img, colored_img)
img_color = img_color.convert("RGB")
img_color_name, ext = os.path.splitext(img_name)
img_color_name = "{}_color{}".format(img_color_name, ext)
img_color_url = os.path.join(img_url, img_color_name)
img_color.save(img_color_url)

img = img.convert("RGB")

img_url = os.path.join(img_url, img_name)
img.save(img_url)
return img_url, img_color_url


def init_ll_job():
Expand Down Expand Up @@ -98,7 +116,7 @@ def predict():
test_data_num = len(test_data)
count = 0

# simulate a permenant inference service
# Simulate a pinned inference service
LOGGER.info(f"Inference service starts.")
while True:
for i, data in enumerate(test_data):
Expand All @@ -108,9 +126,9 @@ def predict():
img_rgb = Image.open(test_data_url).convert("RGB")
sample = {'image': img_rgb, "depth": img_rgb, "label": img_rgb}
predict_data = preprocess(sample)
prediction, is_unseen, _ = ll_job.inference(predict_data, \
seen_sample_postprocess=save_predicted_image,
unseen_sample_postprocess=unseen_sample_postprocess)
prediction, is_unseen, _ = ll_job.inference(predict_data,
seen_sample_postprocess=save_predicted_image,
unseen_sample_postprocess=unseen_sample_postprocess)
LOGGER.info(f"Image {i + count + 1} is unseen task: {is_unseen}")
LOGGER.info(
f"Image {i + count + 1} prediction result: {prediction}")
Expand Down
Empty file.
111 changes: 0 additions & 111 deletions examples/lifelong_learning/cityscapes/yaml/robot-dog-delivery.yaml

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -146,14 +146,17 @@ def save_meta_estimators(self, meta_estimator_index):
return meta_estimators

def save_unseen_samples(self, samples, **kwargs):
ood_scores = kwargs.get("unseen_params")[0]
ood_predictions = kwargs.get("unseen_params")[0]
ood_scores = kwargs.get("unseen_params")[1]

for i, sample in enumerate(samples.x):
sample_id = str(uuid.uuid4())
if isinstance(sample, dict):
img = sample.get("image")
else:
img = sample[0]
unseen_sample_info = (sample_id, img, ood_scores[i])
unseen_sample_info = (sample_id, img, ood_predictions[i],
ood_scores[i])
self.unseen_sample_queue.put(unseen_sample_info)

def save_seen_samples(self, samples, results, **kwargs):
Expand Down Expand Up @@ -239,8 +242,8 @@ def __init__(self, unseen_sample_queue, **kwargs):
def run(self):
while True:
time.sleep(self.check_time)
sample_id, img, ood_score = self.unseen_sample_queue.get()
unseen_sample_url = self.upload_unseen_sample(img)
sample_id, img, ood_pred, ood_score = self.unseen_sample_queue.get()
unseen_sample_url = self.upload_unseen_sample(img, ood_pred)
self.upload_meta_data(sample_id, ood_score, unseen_sample_url)
LOGGER.info(f"Upload unseen sample to {unseen_sample_url}")
self.unseen_sample_queue.task_done()
Expand All @@ -257,8 +260,8 @@ def init_unseen_metadata_template(self):
}
return unseen_sample_metadata

def upload_unseen_sample(self, img):
if not self.post_process:
def upload_unseen_sample(self, img, ood_pred):
if not callable(self.post_process):
LOGGER.info("Unseen sample post processing is not callable.")
return

Expand All @@ -267,8 +270,12 @@ def upload_unseen_sample(self, img):
else:
image_name = os.path.basename(img)

local_save_url = self.post_process(
self.local_unseen_dir, img, image_name)
local_save_url, color_local_save_url = self.post_process(
self.local_unseen_dir, img, ood_pred, image_name)

FileOps.upload(color_local_save_url,
os.path.join(self.unseen_save_url,
os.path.basename(color_local_save_url)))
return FileOps.upload(local_save_url,
os.path.join(self.unseen_save_url, image_name))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def __init__(self, task_index, **kwargs):
self.task_group_key = KBResourceConstant.TASK_GROUPS.value
self.extractor_key = KBResourceConstant.EXTRACTOR.value

self.base_model = kwargs.get("base_model") # (num_class=31)
self.base_model = kwargs.get("base_model")
self.backup_model = kwargs.get('OOD_backup_model')
if not self.backup_model:
self.seen_extractor = task_index.get(
Expand Down Expand Up @@ -116,6 +116,7 @@ def inference(self, samples, **kwargs):

tasks = []
OOD_scores = []
ood_predictions = []
for inx, df in enumerate(samples):
m = models[inx]
if not isinstance(m, Model):
Expand All @@ -126,18 +127,19 @@ def inference(self, samples, **kwargs):
evaluator.load(m.model)
else:
evaluator = m.model
InD_list, OoD_list, pred, ood_scores = self.ood_predict(
InD_list, OoD_list, pred, ood_pred, ood_scores = self.ood_predict(
evaluator, df.x, **kwargs)
seen_task_samples.x.extend(InD_list)
unseen_task_samples.x.extend(OoD_list)
OOD_scores.extend(ood_scores)
ood_predictions.extend(ood_pred)
task = Task(entry=m.entry, samples=df)
task.result = pred
task.model = m
tasks.append(task)
res = self._inference_integrate(tasks)
return (seen_task_samples, res, tasks), \
(unseen_task_samples, OOD_scores)
(unseen_task_samples, ood_predictions, OOD_scores)

def ood_predict(self, evaluator, samples, **kwargs):
data = self.preprocess_func(samples)
Expand All @@ -150,7 +152,7 @@ def ood_predict(self, evaluator, samples, **kwargs):
data_loader = evaluator.estimator.validator.test_loader

OoD_list, InD_list = [], []
predictions = []
predictions, ood_predictions = [], []
ood_scores = []

seg_model.eval()
Expand Down Expand Up @@ -202,11 +204,12 @@ def ood_predict(self, evaluator, samples, **kwargs):
if OOD_score > self.OOD_thresh:
OoD_list.append(samples[i])
ood_scores.append(OOD_score)
ood_predictions.append(OOD_pred_show[j])
else:
InD_list.append(samples[i])
predictions.append(pred.data.cpu().numpy())

return InD_list, OoD_list, predictions, ood_scores
return InD_list, OoD_list, predictions, ood_predictions, ood_scores

def train(self, **kwargs):
ood_data_path = os.path.join(BaseConfig.data_path_prefix, 'ood_data')
Expand Down