"""Configuration management for LinkedIn Workflow.""" from typing import Optional from pydantic_settings import BaseSettings, SettingsConfigDict from pathlib import Path class Settings(BaseSettings): """Application settings loaded from environment variables.""" # API Keys openai_api_key: str perplexity_api_key: str apify_api_key: str # Supabase supabase_url: str supabase_key: str supabase_service_role_key: str = "" # Required for admin operations like deleting users # Apify apify_actor_id: str = "apimaestro~linkedin-profile-posts" # Web Interface web_password: str = "" session_secret: str = "" # Development debug: bool = False log_level: str = "INFO" # Email Settings smtp_host: str = "" smtp_port: int = 587 smtp_user: str = "" smtp_password: str = "" smtp_from_name: str = "LinkedIn Post System" email_default_recipient: str = "" # Writer Features (can be toggled to disable new features) writer_multi_draft_enabled: bool = True # Generate multiple drafts and select best writer_multi_draft_count: int = 3 # Number of drafts to generate (2-5) writer_semantic_matching_enabled: bool = True # Use semantically similar example posts writer_learn_from_feedback: bool = True # Learn from recurring critic feedback writer_feedback_history_count: int = 10 # Number of past posts to analyze for patterns # User Frontend (LinkedIn OAuth via Supabase) user_frontend_enabled: bool = True # Enable user frontend with LinkedIn OAuth supabase_redirect_url: str = "" # OAuth Callback URL (e.g., https://linkedin.onyva.dev/auth/callback) # LinkedIn API (Custom OAuth for auto-posting) linkedin_client_id: str = "" linkedin_client_secret: str = "" linkedin_redirect_uri: str = "" # e.g., https://yourdomain.com/settings/linkedin/callback # Token Encryption encryption_key: str = "" # Generate with: python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())" model_config = SettingsConfigDict( env_file=".env", env_file_encoding="utf-8", case_sensitive=False ) # Global settings instance settings = Settings() # API pricing per 1M tokens (input, output) API_PRICING = { "gpt-4o": {"input": 2.50, "output": 10.00}, "gpt-4o-mini": {"input": 0.15, "output": 0.60}, "sonar": {"input": 1.00, "output": 1.00}, } def estimate_cost(model: str, prompt_tokens: int, completion_tokens: int) -> float: """Estimate cost in USD for an API call.""" pricing = API_PRICING.get(model, {"input": 1.00, "output": 1.00}) input_cost = (prompt_tokens / 1_000_000) * pricing["input"] output_cost = (completion_tokens / 1_000_000) * pricing["output"] return input_cost + output_cost