This document provides a comprehensive API reference for StillMe Core Framework.
Main validation orchestrator.
from stillme_core.validation import ValidationEngine
engine = ValidationEngine()
result = engine.validate(
question="What is AI?",
answer="AI is artificial intelligence...",
context_docs=[...],
# ... other parameters
)Key Methods:
validate(): Run all validators and return aggregated resultadd_validator(): Register a new validatorremove_validator(): Remove a validator
Base class for all validators.
from stillme_core.validation import Validator, ValidationResult
class MyValidator(Validator):
def validate(self, question: str, answer: str, **kwargs) -> ValidationResult:
# Validation logic
return ValidationResult(
passed=True,
confidence=0.9,
reasons=["Reason 1", "Reason 2"]
)Available Validators:
CitationRequired: Ensures citations are presentCitationRelevance: Validates citation relevanceEvidenceOverlap: Checks evidence overlap with answerConfidenceValidator: Validates confidence scoresLanguageValidator: Validates language consistencyIdentityCheckValidator: Checks StillMe identityEgoNeutralityValidator: Ensures ego-neutral responsesSourceConsensusValidator: Validates source consensusPhilosophicalDepthValidator: Validates philosophical depth- And 18+ more...
Main RAG retrieval interface.
from stillme_core.rag import RAGRetrieval
rag = RAGRetrieval()
context = rag.retrieve_context(
query="What is machine learning?",
knowledge_limit=5,
similarity_threshold=0.6
)Key Methods:
retrieve_context(): Retrieve context documentsadd_learning_content(): Add new content to knowledge baseretrieve_by_tier(): Retrieve from specific knowledge tier
Vector database client.
from stillme_core.rag import ChromaClient
client = ChromaClient()
# Use client for direct database operationsEmbedding generation service.
from stillme_core.rag import EmbeddingService
service = EmbeddingService()
embeddings = service.embed(["text 1", "text 2"])Coordinates external data fetching.
from stillme_core.external_data import ExternalDataOrchestrator
orchestrator = ExternalDataOrchestrator()
result = await orchestrator.fetch_data(
intent=ExternalDataIntent.WEATHER,
query="weather in New York"
)Key Methods:
fetch_data(): Fetch data for a specific intentregister_provider(): Register a new provider
Detect if query requires external data.
from stillme_core.external_data import detect_external_data_intent
intent = detect_external_data_intent("What's the weather today?")
# Returns: ExternalDataIntent.WEATHERBase class for external data providers.
from stillme_core.external_data import ExternalDataProvider, ExternalDataResult
class MyProvider(ExternalDataProvider):
async def fetch(self, query: str) -> ExternalDataResult:
# Fetch logic
return ExternalDataResult(
success=True,
data={"key": "value"},
source="my_provider"
)Automated learning scheduler.
from stillme_core.learning import LearningScheduler
from stillme_core.rag import RAGRetrieval
rag = RAGRetrieval()
scheduler = LearningScheduler(
rag_retrieval=rag,
interval_hours=4
)
await scheduler.start()
# Runs learning cycles every 4 hoursKey Methods:
start(): Start the schedulerstop(): Stop the schedulerrun_learning_cycle(): Run a single learning cycleget_status(): Get scheduler status
Abstract learning pipeline interface.
from stillme_core.learning import LearningPipeline, LearningResult
class MyPipeline(LearningPipeline):
def run_learning_cycle(self) -> LearningResult:
# Learning logic
return LearningResult(
cycle_number=1,
entries_fetched=10,
entries_added=8,
entries_filtered=2,
sources={"rss": 5, "arxiv": 5},
duration_seconds=30.0
)Content filtering and prioritization.
from stillme_core.learning import ContentCurator
curator = ContentCurator()
filtered, rejected = curator.pre_filter_content(content_list)Key Methods:
pre_filter_content(): Filter content before embeddingprioritize_content(): Prioritize content by importance
Abstract fetcher interface.
from stillme_core.learning import LearningFetcher
class MyFetcher(LearningFetcher):
def fetch(self, limit: Optional[int] = None) -> List[Dict[str, Any]]:
# Fetch logic
return [{"title": "...", "content": "...", ...}]
def get_source_name(self) -> str:
return "my_source"Abstract post-processing interface.
from stillme_core.postprocessing import PostProcessor, PostProcessingResult
class MyProcessor(PostProcessor):
def process(self, text: str, context: Optional[Dict] = None) -> PostProcessingResult:
# Processing logic
return PostProcessingResult(
processed_text="...",
quality_score=0.9,
rewrite_attempted=False
)
def evaluate_quality(self, text: str) -> float:
return 0.9Assess response quality.
from stillme_core.postprocessing import QualityEvaluator
evaluator = QualityEvaluator()
issues = evaluator.evaluate(
text="Response text...",
question="Original question...",
context_docs=[...]
)Normalize response style.
from stillme_core.postprocessing import StyleSanitizer
sanitizer = StyleSanitizer()
sanitized = sanitizer.sanitize("Raw response text...")Conditional LLM rewriting.
from stillme_core.postprocessing import RewriteLLM
rewriter = RewriteLLM()
result = await rewriter.rewrite(
text="Original text...",
original_question="Question...",
quality_issues=["issue1", "issue2"]
)Smart skip logic for post-processing.
from stillme_core.postprocessing import PostProcessingOptimizer
optimizer = PostProcessingOptimizer()
should_process = optimizer.should_process(
question="Simple question?",
answer="Simple answer."
)Centralized metrics collection.
from stillme_core.monitoring import get_metrics_collector, MetricCategory
metrics = get_metrics_collector()
# Record validation
metrics.record_validation(
passed=True,
reasons=["reason1"],
confidence_score=0.9
)
# Record RAG retrieval
metrics.record_rag_retrieval(
query="...",
num_results=5,
avg_similarity=0.8,
retrieval_time_ms=100.0
)
# Record learning cycle
metrics.record_learning_cycle(
cycle_number=1,
entries_fetched=10,
entries_added=8,
duration_seconds=30.0
)
# Get metrics
all_metrics = metrics.get_all_metrics()
validation_metrics = metrics.get_metrics(MetricCategory.VALIDATION)Key Methods:
record_validation(): Record validation metricsrecord_rag_retrieval(): Record RAG metricsrecord_learning_cycle(): Record learning metricsincrement_counter(): Increment a counterset_gauge(): Set a gauge valuerecord_histogram(): Record histogram valuesget_metrics(): Get metrics for a categoryget_all_metrics(): Get all metrics
Automated improvement engine.
from stillme_core.self_improvement import get_improvement_engine
engine = get_improvement_engine()
suggestions = engine.generate_improvements(days=7)Key Methods:
generate_improvements(): Generate improvement suggestionsapply_improvements(): Apply improvements automatically
Feedback loop from validation to learning.
from stillme_core.self_improvement import get_feedback_loop
loop = get_feedback_loop()
loop.process_validation_results(validation_results)Key Methods:
process_validation_results(): Process validation resultsupdate_learning_priorities(): Update learning priorities
Pattern analysis and knowledge gap detection.
from stillme_core.self_improvement import get_self_improvement_analyzer
analyzer = get_self_improvement_analyzer()
patterns = analyzer.analyze_patterns(days=7)
gaps = analyzer.detect_knowledge_gaps()Key Methods:
analyze_patterns(): Analyze validation patternsdetect_knowledge_gaps(): Detect knowledge gapssuggest_learning_content(): Suggest learning content
Base configuration class.
from stillme_core.config import BaseConfig
class MyConfig(BaseConfig):
def __init__(self):
super().__init__()
self.my_setting = self.get_env("MY_SETTING", default="default_value")Validator-specific configuration.
from stillme_core.config import ValidatorConfig
config = ValidatorConfig()
threshold = config.get_threshold("confidence", default=0.8)from stillme_core.validation import Validator, ValidationResult
class CustomValidator(Validator):
def validate(self, question: str, answer: str, **kwargs) -> ValidationResult:
# Your validation logic
passed = self._check_something(answer)
return ValidationResult(
passed=passed,
confidence=0.9 if passed else 0.3,
reasons=["Custom validation reason"]
)
# Register it
engine = ValidationEngine()
engine.add_validator(CustomValidator())from stillme_core.learning import LearningFetcher
class CustomFetcher(LearningFetcher):
def fetch(self, limit: Optional[int] = None) -> List[Dict[str, Any]]:
# Fetch logic
return [
{
"title": "Title",
"summary": "Summary",
"source": "custom_source",
"link": "https://..."
}
]
def get_source_name(self) -> str:
return "custom_source"from stillme_core.monitoring import get_metrics_collector, MetricCategory
metrics = get_metrics_collector()
# Record custom metric
metrics.increment_counter(
MetricCategory.VALIDATION,
"custom_metric",
metadata={"key": "value"}
)
# Get metrics
all_metrics = metrics.get_all_metrics()
print(all_metrics)All components follow consistent error handling:
- Validation errors: Return
ValidationResultwithpassed=False - RAG errors: Return empty context or raise exceptions
- External data errors: Return
ExternalDataResultwithsuccess=False - Learning errors: Log errors and continue with next cycle
- Post-processing errors: Return original text if processing fails
All APIs use Python type hints for better IDE support and type checking.
from typing import List, Dict, Any, Optional
from stillme_core.validation import ValidationResult
def my_function(text: str, count: int = 0) -> Optional[ValidationResult]:
# ...