Use Cases¶
Practical applications of the Triadic Neurosymbolic Engine with code examples.
Explainable RAG¶
Instead of returning top-k by cosine score, return documents whose prime signatures subsume the query signature. Every result is provably relevant.
from neurosym import ContinuousEncoder, DiscreteMapper, DiscreteValidator
encoder = ContinuousEncoder("all-MiniLM-L6-v2")
mapper = DiscreteMapper(n_bits=8, projection="pca")
validator = DiscreteValidator()
# Index your documents
docs = ["GDPR compliance guide", "Cookie consent policy", "Employee handbook"]
embeddings = encoder.encode(docs)
doc_primes = mapper.fit_transform(docs, embeddings)
# Query
query = "consent"
q_emb = encoder.encode([query])
q_prime = mapper.transform([query], q_emb)
# Find documents that contain ALL features of the query
for doc, prime in doc_primes.items():
if validator.subsumes(prime, q_prime[query]):
print(f" {doc}")
AI Model Auditing¶
Detect when two embedding models structure the same concept differently. Compare topological structure, not just similarity scores.
from neurosym import ContinuousEncoder, DiscreteMapper
# Encode with two different models
encoder_a = ContinuousEncoder("all-MiniLM-L6-v2")
encoder_b = ContinuousEncoder("paraphrase-MiniLM-L3-v2")
mapper = DiscreteMapper(n_bits=8, projection="pca")
concepts = ["King", "Queen", "Animal", "Dog"]
primes_a = mapper.fit_transform(concepts, encoder_a.encode(concepts))
primes_b = mapper.fit_transform(concepts, encoder_b.encode(concepts))
# Compare subsumption relationships across models
for c1 in concepts:
for c2 in concepts:
if c1 != c2:
sub_a = primes_a[c1] % primes_a[c2] == 0
sub_b = primes_b[c1] % primes_b[c2] == 0
if sub_a != sub_b:
print(f" Discrepancy: {c1} -> {c2}")
Benchmark
The Engine found 108,694 discrepancies auditing 2M semantic chains across two embedding models.
Semantic Deduplication¶
Two records are semantically duplicate if one subsumes the other. Exact, not probabilistic.
from neurosym import ContinuousEncoder, DiscreteMapper, DiscreteValidator
encoder = ContinuousEncoder("all-MiniLM-L6-v2")
mapper = DiscreteMapper(n_bits=8, projection="pca")
validator = DiscreteValidator()
records = [
"Machine learning engineer",
"ML engineer",
"Senior machine learning engineer",
]
embeddings = encoder.encode(records)
primes = mapper.fit_transform(records, embeddings)
# Find subsumption-based duplicates
for i, r1 in enumerate(records):
for r2 in records[i+1:]:
if validator.subsumes(primes[r1], primes[r2]):
print(f" '{r1}' subsumes '{r2}'")
Compliance Validation¶
Verify that regulatory concepts maintain expected hierarchical relationships in your ontology.
from neurosym import ContinuousEncoder, DiscreteMapper, DiscreteValidator
encoder = ContinuousEncoder("all-MiniLM-L6-v2")
mapper = DiscreteMapper(n_bits=8, projection="pca")
validator = DiscreteValidator()
concepts = ["GDPR", "consent", "data subject rights", "privacy"]
embeddings = encoder.encode(concepts)
primes = mapper.fit_transform(concepts, embeddings)
# Check: does "GDPR" contain all features of "consent"?
assert validator.subsumes(primes["GDPR"], primes["consent"]), \
"GDPR does not subsume consent -- ontology gap detected"
Anomaly Detection¶
Tabular rows that break the multiplicative patterns of their peers are flagged as anomalies -- with a proof, not just a score.
from neurosym.anomaly import AnomalyDetector, RelationalRule
import pandas as pd
df = pd.DataFrame({
"price": [100, 200, 300],
"quantity": [2, 3, 4],
"total": [200, 600, 1100], # row 2 is wrong: 300*4 = 1200, not 1100
})
detector = AnomalyDetector()
rule = RelationalRule(
factor_columns=["price", "quantity"],
result_column="total"
)
anomalies = detector.detect(df, rules=[rule])
for a in anomalies:
print(f" Row {a.row}: expected {a.expected}, got {a.actual}")
print(f" Missing factor: {a.missing_factor}")