Skip to content

Commit 8431987

Browse files
authored
add logging (#55)
1 parent e6c1996 commit 8431987

File tree

2 files changed

+11
-2
lines changed

2 files changed

+11
-2
lines changed

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,10 @@ Note that the centml backend compiler is non-blocking. This means it that until
5757

5858
Again, make sure your script's environment sets `CENTML_SERVER_URL` to communicate with the desired server.
5959

60+
To see logs, add this to your script before triggering compilation:
61+
```python
62+
logging.basicConfig(level=logging.INFO)
63+
```
6064

6165
### Tests
6266
To run tests, first install required packages:

centml/compiler/backend.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,9 @@ def _get_model_id(self) -> str:
7474
for block in iter(lambda: serialized_model_file.read(config_instance.HASH_CHUNK_SIZE), b""):
7575
sha_hash.update(block)
7676

77-
return sha_hash.hexdigest()
77+
model_id = sha_hash.hexdigest()
78+
logging.info(f"Model has id {model_id}")
79+
return model_id
7880

7981
def _download_model(self, model_id: str):
8082
download_response = requests.get(
@@ -123,7 +125,6 @@ def _wait_for_status(self, model_id: str) -> bool:
123125
raise Exception(
124126
f"Status check: request failed, exception from server:\n{status_response.json().get('detail')}"
125127
)
126-
127128
status = status_response.json().get("status")
128129

129130
if status == CompilationStatus.DONE.value:
@@ -132,6 +133,7 @@ def _wait_for_status(self, model_id: str) -> bool:
132133
pass
133134
elif status == CompilationStatus.NOT_FOUND.value:
134135
tries += 1
136+
logging.info("Submitting model to server for compilation.")
135137
self._compile_model(model_id)
136138
else:
137139
tries += 1
@@ -149,13 +151,16 @@ def remote_compilation(self):
149151
# check if compiled forward is saved locally
150152
compiled_forward_path = get_backend_compiled_forward_path(model_id)
151153
if os.path.isfile(compiled_forward_path):
154+
logging.info("Compiled model found in local cache. Not submitting to server.")
152155
compiled_forward = torch.load(compiled_forward_path)
153156
else:
154157
self._wait_for_status(model_id)
155158
compiled_forward = self._download_model(model_id)
156159

157160
self.compiled_forward_function = compiled_forward
158161

162+
logging.info("Compilation successful.")
163+
159164
# Let garbage collector free the memory used by the uncompiled model
160165
with self.lock:
161166
del self.inputs

0 commit comments

Comments
 (0)