Skip to content

Commit 435d6a7

Browse files
Refactor: Improve CI, config loading, and thresholds
This commit addresses several issues: - Enhances `pyproject.toml` with proper package metadata and build system configuration. - Fixes `check_harmony.py` to correctly load project-wide configuration. - Adjusts harmony check thresholds to be more realistic for real-world codebases. - Reformats all code using Black for consistent style. - Ensures all local CI checks (flake8, black, pytest) pass. - Verifies package installation and command-line tool functionality. Co-authored-by: taurekaw <taurekaw@gmail.com>
1 parent 453e9de commit 435d6a7

21 files changed

+783
-397
lines changed

CI_VERIFICATION_REPORT.md

Lines changed: 541 additions & 0 deletions
Large diffs are not rendered by default.

check_harmony.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,18 @@ def check_harmony(
1616
print(f"Running LJPW Harmony Check on: {os.path.abspath(target_dir)}")
1717
print("=" * 60)
1818

19-
# Load config explicitly if provided, otherwise auto-load
20-
# Note: LegacyCodeMapper loads config automatically from target_dir,
21-
# but if we want to override with a specific file, we might need to adjust ConfigLoader.
22-
# For now, we'll rely on auto-loading from target_dir.
23-
19+
# If analyzing a subdirectory, find project root for config
20+
# Otherwise use target_dir
21+
project_root = os.getcwd() if target_dir != "." else target_dir
22+
23+
# Create mapper - it will load config from project_root
2424
mapper = LegacyCodeMapper(target_dir, quiet=not verbose)
25+
26+
# If we're in project root, use config from there
27+
if os.path.exists(os.path.join(project_root, "pyproject.toml")):
28+
from harmonizer.config import ConfigLoader
29+
mapper.config = ConfigLoader.load(project_root)
30+
2531
mapper.analyze_codebase(show_progress=True)
2632

2733
failures = []

harmonizer/ast_semantic_parser.py

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,10 @@ class AST_Semantic_Parser(ast.NodeVisitor):
2323
"""
2424
A "Rosetta Stone" that translates Python AST nodes into
2525
DIVE-V2 conceptual keywords.
26-
26+
2727
This parser walks through Python's Abstract Syntax Tree and categorizes
2828
code constructs into semantic dimensions (Love, Justice, Power, Wisdom).
29-
29+
3030
Note: Visitor methods don't "visit" in the semantic sense - they record
3131
and categorize AST nodes into semantic concepts for later analysis.
3232
"""
@@ -115,9 +115,7 @@ def _map_word_to_concept(self, word: str) -> Optional[str]:
115115
return concept
116116
return None
117117

118-
def get_intent_concepts(
119-
self, function_name: str, docstring: Optional[str]
120-
) -> List[str]:
118+
def get_intent_concepts(self, function_name: str, docstring: Optional[str]) -> List[str]:
121119
"""
122120
Parses the function's name and docstring to find its "Stated Purpose" (Intent).
123121
"""
@@ -137,9 +135,7 @@ def get_intent_concepts(
137135
return [word for word in name_words if word in self.known_vocabulary]
138136
return list(concepts)
139137

140-
def get_execution_map(
141-
self, body: List[ast.AST]
142-
) -> Tuple[Dict[ast.AST, str], List[str]]:
138+
def get_execution_map(self, body: List[ast.AST]) -> Tuple[Dict[ast.AST, str], List[str]]:
143139
"""
144140
Parses the function's body to map each AST node to a semantic dimension
145141
and return the list of concepts found.
@@ -159,7 +155,7 @@ def _add_concept(self, node: ast.AST, concept: str):
159155
def visit_Call(self, node: ast.Call) -> None:
160156
"""
161157
Records function/method calls and categorizes them semantically.
162-
158+
163159
Maps method names to semantic dimensions (e.g., 'execute' -> Power,
164160
'validate' -> Justice, 'get' -> Wisdom).
165161
"""
@@ -186,7 +182,7 @@ def visit_Call(self, node: ast.Call) -> None:
186182
def visit_If(self, node: ast.If) -> None:
187183
"""
188184
Records If statements as Justice concepts (control flow/decision-making).
189-
185+
190186
If statements enforce conditions and control execution flow, which
191187
aligns with Justice (rules, structure, enforcement).
192188
"""
@@ -196,7 +192,7 @@ def visit_If(self, node: ast.If) -> None:
196192
def visit_Assert(self, node: ast.Assert) -> None:
197193
"""
198194
Records Assert statements as Justice concepts (validation/enforcement).
199-
195+
200196
Assertions enforce invariants and preconditions, directly representing
201197
Justice principles of validation and rule enforcement.
202198
"""
@@ -206,7 +202,7 @@ def visit_Assert(self, node: ast.Assert) -> None:
206202
def visit_Try(self, node: ast.Try) -> None:
207203
"""
208204
Records Try-Except blocks with dual semantics.
209-
205+
210206
Try blocks represent Justice (structural error handling), while
211207
exception handlers represent Love (mercy, graceful recovery).
212208
"""
@@ -218,7 +214,7 @@ def visit_Try(self, node: ast.Try) -> None:
218214
def visit_Raise(self, node: ast.Raise) -> None:
219215
"""
220216
Records Raise statements as Power concepts (forceful action).
221-
217+
222218
Raising exceptions is an active, forceful interruption of normal
223219
flow, representing Power (control, force, action).
224220
"""
@@ -228,7 +224,7 @@ def visit_Raise(self, node: ast.Raise) -> None:
228224
def visit_For(self, node: ast.For) -> None:
229225
"""
230226
Records For loops as Justice concepts (structured iteration).
231-
227+
232228
For loops impose structure and order on iteration, representing
233229
Justice (rules, patterns, systematic processing).
234230
"""
@@ -238,7 +234,7 @@ def visit_For(self, node: ast.For) -> None:
238234
def visit_While(self, node: ast.While) -> None:
239235
"""
240236
Records While loops as Justice concepts (conditional iteration).
241-
237+
242238
While loops enforce conditions for continued iteration, representing
243239
Justice (rules, enforcement, conditional control).
244240
"""
@@ -248,7 +244,7 @@ def visit_While(self, node: ast.While) -> None:
248244
def visit_Return(self, node: ast.Return) -> None:
249245
"""
250246
Records Return statements as Wisdom concepts (providing results).
251-
247+
252248
Return statements deliver computed results or knowledge back to
253249
callers, representing Wisdom (information, knowledge transfer).
254250
"""
@@ -258,7 +254,7 @@ def visit_Return(self, node: ast.Return) -> None:
258254
def generic_visit(self, node: ast.AST) -> None:
259255
"""
260256
Default visitor that continues traversing the AST.
261-
257+
262258
This method is called for AST node types that don't have
263259
specific visitor methods defined.
264260
"""

harmonizer/ast_semantic_parser_v2.py

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,7 @@ def _split_name(self, name: str) -> List[str]:
7373
else:
7474
return self._split_camel_case(name)
7575

76-
def _map_word_to_concept(
77-
self, word: str, context: str = "default"
78-
) -> Optional[str]:
76+
def _map_word_to_concept(self, word: str, context: str = "default") -> Optional[str]:
7977
"""
8078
Map a word to its semantic dimension.
8179
@@ -121,9 +119,7 @@ def _check_compound_pattern(self, words: List[str]) -> Optional[str]:
121119
return COMPOUND_PATTERNS[compound]
122120
return None
123121

124-
def get_intent_concepts(
125-
self, function_name: str, docstring: Optional[str]
126-
) -> List[str]:
122+
def get_intent_concepts(self, function_name: str, docstring: Optional[str]) -> List[str]:
127123
"""
128124
Parse function name and docstring to extract semantic intent.
129125
@@ -158,15 +154,11 @@ def get_intent_concepts(
158154

159155
# Fallback to words in vocabulary
160156
if not concepts and name_words:
161-
concepts.update(
162-
[word for word in name_words if word in self.known_vocabulary]
163-
)
157+
concepts.update([word for word in name_words if word in self.known_vocabulary])
164158

165159
return list(concepts)
166160

167-
def get_execution_map(
168-
self, body: List[ast.AST]
169-
) -> Tuple[Dict[ast.AST, str], List[str]]:
161+
def get_execution_map(self, body: List[ast.AST]) -> Tuple[Dict[ast.AST, str], List[str]]:
170162
"""
171163
Parse function body to map AST nodes to semantic dimensions.
172164

harmonizer/divine_invitation_engine_V2.py

Lines changed: 15 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -380,9 +380,7 @@ def _build_complete_vocabulary(self) -> None:
380380
for word, domains in enhanced_vocab.items():
381381
if word not in self._keyword_map and domains:
382382
first_concept = next(iter(domains))
383-
self._keyword_map[word] = self._keyword_map.get(
384-
first_concept, Dimension.WISDOM
385-
)
383+
self._keyword_map[word] = self._keyword_map.get(first_concept, Dimension.WISDOM)
386384

387385
for dimension, words in coding_vocab.items():
388386
for word in words:
@@ -488,9 +486,7 @@ def _empty_result(self) -> SemanticResult:
488486
empty_coords = Coordinates(0.0, 0.0, 0.0, 0.0)
489487
return SemanticResult(
490488
coordinates=empty_coords,
491-
distance_from_anchor=self.vocab.get_distance(
492-
self.ANCHOR_POINT, empty_coords
493-
),
489+
distance_from_anchor=self.vocab.get_distance(self.ANCHOR_POINT, empty_coords),
494490
semantic_clarity=0.0,
495491
concept_count=0,
496492
confidence=0.0,
@@ -510,9 +506,7 @@ def _calculate_cluster_metrics(
510506
wisdom_sum += coords.wisdom
511507

512508
n = len(coords_list)
513-
centroid = Coordinates(
514-
love_sum / n, justice_sum / n, power_sum / n, wisdom_sum / n
515-
)
509+
centroid = Coordinates(love_sum / n, justice_sum / n, power_sum / n, wisdom_sum / n)
516510

517511
# Calculate distances and cohesion
518512
distances = [self.vocab.get_distance(c, centroid) for c in coords_list]
@@ -558,9 +552,7 @@ def __init__(self, vocab_manager: VocabularyManager, analyzer: SemanticAnalyzer)
558552
self.vocab = vocab_manager
559553
self.analyzer = analyzer
560554

561-
def infer_unknown_meaning(
562-
self, unknown_word: str, context_words: List[str]
563-
) -> SemanticResult:
555+
def infer_unknown_meaning(self, unknown_word: str, context_words: List[str]) -> SemanticResult:
564556
"""Optimized meaning inference"""
565557
context_result = self.analyzer.analyze_concept_cluster(context_words)
566558

@@ -609,8 +601,7 @@ def analyze_entity_posture(
609601

610602
# Weighted combination (70% recent, 30% historical)
611603
combined_coords = Coordinates(
612-
love=(actions_result.coordinates.love * 0.7)
613-
+ (history_result.coordinates.love * 0.3),
604+
love=(actions_result.coordinates.love * 0.7) + (history_result.coordinates.love * 0.3),
614605
justice=(actions_result.coordinates.justice * 0.7)
615606
+ (history_result.coordinates.justice * 0.3),
616607
power=(actions_result.coordinates.power * 0.7)
@@ -621,9 +612,7 @@ def analyze_entity_posture(
621612

622613
return self._determine_posture(combined_coords, entity_name, entity_type)
623614

624-
def _determine_posture(
625-
self, coords: Coordinates, entity_name: str, entity_type: str
626-
) -> Dict:
615+
def _determine_posture(self, coords: Coordinates, entity_name: str, entity_type: str) -> Dict:
627616
"""Optimized posture determination"""
628617
distance = self.vocab.get_distance(self.ANCHOR_POINT, coords)
629618
clarity = self.vocab.get_semantic_clarity(coords)
@@ -651,9 +640,7 @@ def _determine_posture(
651640
if distance < 0.5:
652641
posture_type = "Balanced Leadership (Harmonized)"
653642
elif distance > 1.5:
654-
posture_type = (
655-
f"Chaotic / Destabilized ({dominant_dim.value.title()} Focus)"
656-
)
643+
posture_type = f"Chaotic / Destabilized ({dominant_dim.value.title()} Focus)"
657644

658645
return {
659646
"entity_name": entity_name,
@@ -704,9 +691,7 @@ def analyze_ice(
704691
)
705692

706693
# Calculate ICE metrics
707-
avg_disharmony = (
708-
intent_context_dist + intent_exec_dist + context_exec_dist
709-
) / 3.0
694+
avg_disharmony = (intent_context_dist + intent_exec_dist + context_exec_dist) / 3.0
710695
ice_coherence = max(0.0, 1.0 - (avg_disharmony / 2.0))
711696

712697
avg_dist_from_anchor = (
@@ -721,9 +706,7 @@ def analyze_ice(
721706
) / 2.0
722707

723708
# Calculate ICE coordinate
724-
ice_coord = self._calculate_ice_coordinate(
725-
intent_result, context_result, execution_result
726-
)
709+
ice_coord = self._calculate_ice_coordinate(intent_result, context_result, execution_result)
727710

728711
# LJPW Baseline-enhanced disharmony metrics
729712
# Use coupling-aware metrics for intent-execution alignment
@@ -778,21 +761,15 @@ def analyze_ice(
778761
"intent_composite_score": intent_result.composite_score,
779762
"execution_composite_score": execution_result.composite_score,
780763
},
781-
"ice_harmony_level": self._determine_ice_harmony_level(
782-
ice_coherence, ice_balance
783-
),
764+
"ice_harmony_level": self._determine_ice_harmony_level(ice_coherence, ice_balance),
784765
}
785766

786767
def _calculate_ice_coordinate(
787768
self, intent: SemanticResult, context: SemanticResult, execution: SemanticResult
788769
) -> Coordinates: # noqa: E501
789770
"""Calculate ICE coordinate from components"""
790771
return Coordinates(
791-
love=(
792-
intent.coordinates.love
793-
+ context.coordinates.love
794-
+ execution.coordinates.love
795-
)
772+
love=(intent.coordinates.love + context.coordinates.love + execution.coordinates.love)
796773
/ 3,
797774
justice=(
798775
intent.coordinates.justice
@@ -801,9 +778,7 @@ def _calculate_ice_coordinate(
801778
)
802779
/ 3,
803780
power=(
804-
intent.coordinates.power
805-
+ context.coordinates.power
806-
+ execution.coordinates.power
781+
intent.coordinates.power + context.coordinates.power + execution.coordinates.power
807782
)
808783
/ 3,
809784
wisdom=(
@@ -928,14 +903,10 @@ def __init__(self, config: Optional[Dict] = None):
928903
self.semantic_analyzer = SemanticAnalyzer(self.vocabulary, self.ANCHOR_POINT)
929904

930905
# Build specialized sub-engines
931-
self.inference_engine = MathematicalInferenceEngine(
932-
self.vocabulary, self.semantic_analyzer
933-
)
906+
self.inference_engine = MathematicalInferenceEngine(self.vocabulary, self.semantic_analyzer)
934907
self.ice_analyzer = ICEAnalyzer(self.vocabulary, self.semantic_analyzer)
935908
self.phi_optimizer = PhiOptimizer(self.vocabulary, self.semantic_analyzer)
936-
self.geopolitical_analyzer = GeopoliticalAnalyzer(
937-
self.vocabulary, self.semantic_analyzer
938-
)
909+
self.geopolitical_analyzer = GeopoliticalAnalyzer(self.vocabulary, self.semantic_analyzer)
939910

940911
def get_engine_version(self) -> str:
941912
return self.ENGINE_VERSION
@@ -993,9 +964,7 @@ def perform_ice_analysis(
993964
execution_words: List[str],
994965
) -> Dict:
995966
"""Perform ICE framework analysis"""
996-
return self.ice_analyzer.analyze_ice(
997-
intent_words, context_words, execution_words
998-
)
967+
return self.ice_analyzer.analyze_ice(intent_words, context_words, execution_words)
999968

1000969
def perform_phi_optimization(self, concepts: List[str]) -> Dict:
1001970
"""Perform phi-enhanced optimization"""

0 commit comments

Comments
 (0)