From b50594dbfa35a3e2cfeb6cb8cd74f1b8e5189001 Mon Sep 17 00:00:00 2001 From: Ruben Fischer Date: Tue, 3 Feb 2026 12:48:43 +0100 Subject: [PATCH] aktueller stand --- .dockerignore | 51 + .env.example | 83 ++ .gitignore | 57 + DEPLOYMENT.md | 388 +++++ Dockerfile | 39 + config/supabase_schema.sql | 224 +++ data | 130 ++ docker-compose.ssl.yml | 67 + docker-compose.yml | 37 + logo.png | Bin 0 -> 53598 bytes main.py | 50 + maintenance_cleanup_reposts.py | 146 ++ maintenance_extract_topics.py | 93 ++ maintenance_fix_markdown_bold.py | 181 +++ post_output.json | 466 ++++++ requirements.txt | 26 + run_web.py | 16 + src/__init__.py | 2 + src/agents/__init__.py | 20 + src/agents/base.py | 120 ++ src/agents/critic.py | 276 ++++ src/agents/post_classifier.py | 279 ++++ src/agents/post_type_analyzer.py | 335 +++++ src/agents/profile_analyzer.py | 300 ++++ src/agents/researcher.py | 630 +++++++++ src/agents/topic_extractor.py | 129 ++ src/agents/writer.py | 764 ++++++++++ src/config.py | 57 + src/database/__init__.py | 25 + src/database/client.py | 533 +++++++ src/database/models.py | 126 ++ src/email_service.py | 144 ++ src/orchestrator.py | 743 ++++++++++ src/scraper/__init__.py | 4 + src/scraper/apify_scraper.py | 168 +++ src/tui/__init__.py | 4 + src/tui/app.py | 912 ++++++++++++ src/web/__init__.py | 1 + src/web/admin/__init__.py | 4 + src/web/admin/auth.py | 32 + src/web/admin/routes.py | 693 +++++++++ src/web/app.py | 39 + src/web/static/logo.png | Bin 0 -> 53598 bytes src/web/templates/admin/base.html | 105 ++ src/web/templates/admin/create_post.html | 539 +++++++ src/web/templates/admin/dashboard.html | 97 ++ src/web/templates/admin/login.html | 72 + src/web/templates/admin/new_customer.html | 274 ++++ src/web/templates/admin/post_detail.html | 1481 ++++++++++++++++++++ src/web/templates/admin/posts.html | 152 ++ src/web/templates/admin/research.html | 215 +++ src/web/templates/admin/scraped_posts.html | 571 ++++++++ src/web/templates/admin/status.html | 159 +++ src/web/templates/base.html | 103 ++ src/web/templates/create_post.html | 539 +++++++ src/web/templates/dashboard.html | 97 ++ src/web/templates/login.html | 72 + src/web/templates/new_customer.html | 274 ++++ src/web/templates/post_detail.html | 1481 ++++++++++++++++++++ src/web/templates/posts.html | 152 ++ src/web/templates/research.html | 215 +++ src/web/templates/scraped_posts.html | 571 ++++++++ src/web/templates/status.html | 155 ++ src/web/templates/user/auth_callback.html | 45 + src/web/templates/user/base.html | 113 ++ src/web/templates/user/create_post.html | 479 +++++++ src/web/templates/user/dashboard.html | 76 + src/web/templates/user/login.html | 75 + src/web/templates/user/not_authorized.html | 40 + src/web/templates/user/post_detail.html | 698 +++++++++ src/web/templates/user/posts.html | 114 ++ src/web/templates/user/research.html | 185 +++ src/web/templates/user/status.html | 142 ++ src/web/user/__init__.py | 4 + src/web/user/auth.py | 348 +++++ src/web/user/routes.py | 464 ++++++ workflow_now.json | 638 +++++++++ 77 files changed, 19139 insertions(+) create mode 100644 .dockerignore create mode 100644 .env.example create mode 100644 .gitignore create mode 100644 DEPLOYMENT.md create mode 100644 Dockerfile create mode 100644 config/supabase_schema.sql create mode 100644 data create mode 100644 docker-compose.ssl.yml create mode 100644 docker-compose.yml create mode 100644 logo.png create mode 100644 main.py create mode 100644 maintenance_cleanup_reposts.py create mode 100644 maintenance_extract_topics.py create mode 100644 maintenance_fix_markdown_bold.py create mode 100644 post_output.json create mode 100644 requirements.txt create mode 100644 run_web.py create mode 100644 src/__init__.py create mode 100644 src/agents/__init__.py create mode 100644 src/agents/base.py create mode 100644 src/agents/critic.py create mode 100644 src/agents/post_classifier.py create mode 100644 src/agents/post_type_analyzer.py create mode 100644 src/agents/profile_analyzer.py create mode 100644 src/agents/researcher.py create mode 100644 src/agents/topic_extractor.py create mode 100644 src/agents/writer.py create mode 100644 src/config.py create mode 100644 src/database/__init__.py create mode 100644 src/database/client.py create mode 100644 src/database/models.py create mode 100644 src/email_service.py create mode 100644 src/orchestrator.py create mode 100644 src/scraper/__init__.py create mode 100644 src/scraper/apify_scraper.py create mode 100644 src/tui/__init__.py create mode 100644 src/tui/app.py create mode 100644 src/web/__init__.py create mode 100644 src/web/admin/__init__.py create mode 100644 src/web/admin/auth.py create mode 100644 src/web/admin/routes.py create mode 100644 src/web/app.py create mode 100644 src/web/static/logo.png create mode 100644 src/web/templates/admin/base.html create mode 100644 src/web/templates/admin/create_post.html create mode 100644 src/web/templates/admin/dashboard.html create mode 100644 src/web/templates/admin/login.html create mode 100644 src/web/templates/admin/new_customer.html create mode 100644 src/web/templates/admin/post_detail.html create mode 100644 src/web/templates/admin/posts.html create mode 100644 src/web/templates/admin/research.html create mode 100644 src/web/templates/admin/scraped_posts.html create mode 100644 src/web/templates/admin/status.html create mode 100644 src/web/templates/base.html create mode 100644 src/web/templates/create_post.html create mode 100644 src/web/templates/dashboard.html create mode 100644 src/web/templates/login.html create mode 100644 src/web/templates/new_customer.html create mode 100644 src/web/templates/post_detail.html create mode 100644 src/web/templates/posts.html create mode 100644 src/web/templates/research.html create mode 100644 src/web/templates/scraped_posts.html create mode 100644 src/web/templates/status.html create mode 100644 src/web/templates/user/auth_callback.html create mode 100644 src/web/templates/user/base.html create mode 100644 src/web/templates/user/create_post.html create mode 100644 src/web/templates/user/dashboard.html create mode 100644 src/web/templates/user/login.html create mode 100644 src/web/templates/user/not_authorized.html create mode 100644 src/web/templates/user/post_detail.html create mode 100644 src/web/templates/user/posts.html create mode 100644 src/web/templates/user/research.html create mode 100644 src/web/templates/user/status.html create mode 100644 src/web/user/__init__.py create mode 100644 src/web/user/auth.py create mode 100644 src/web/user/routes.py create mode 100644 workflow_now.json diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..1e39417 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,51 @@ +# Git +.git +.gitignore + +# Environment files (secrets!) +.env +.env.local +.env.*.local + +# Python +__pycache__ +*.py[cod] +*$py.class +*.so +.Python +.venv +venv/ +ENV/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + +# Logs +*.log +logs/ + +# Test +.pytest_cache/ +.coverage +htmlcov/ + +# Build +build/ +dist/ +*.egg-info/ + +# Docker +Dockerfile +docker-compose*.yml +.dockerignore + +# Documentation +*.md +!requirements.txt + +# OS +.DS_Store +Thumbs.db diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..2bc737c --- /dev/null +++ b/.env.example @@ -0,0 +1,83 @@ +# =========================================== +# LinkedIn Post Creation System - Environment +# =========================================== + +# Web Interface Password (leave empty to disable auth) +WEB_PASSWORD=your-secure-password-here +SESSION_SECRET=optional-random-string-for-session-security + +# =========================================== +# API Keys +# =========================================== + +# OpenAI API Key (required for post generation) +OPENAI_API_KEY=sk-your-openai-key + +# Perplexity API Key (required for research) +PERPLEXITY_API_KEY=pplx-your-perplexity-key + +# Apify API Key (required for LinkedIn scraping) +APIFY_API_KEY=apify_api_your-apify-key + +# =========================================== +# Supabase Database +# =========================================== + +SUPABASE_URL=https://your-project.supabase.co +SUPABASE_KEY=your-supabase-anon-key + +# =========================================== +# Optional Settings +# =========================================== + +# LinkedIn Scraping (Apify Actor) +APIFY_ACTOR_ID=apimaestro~linkedin-profile-posts + +# Development +DEBUG=false +LOG_LEVEL=INFO + +# =========================================== +# Email Settings (for sending posts) +# =========================================== + +# SMTP Server Configuration +SMTP_HOST=smtp.gmail.com +SMTP_PORT=587 +SMTP_USER=your-email@gmail.com +SMTP_PASSWORD=your-app-password +SMTP_FROM_NAME=LinkedIn Post System + +# Default recipient email (can be overridden in UI) +EMAIL_DEFAULT_RECIPIENT= + +# =========================================== +# Writer Features (Advanced) +# =========================================== + +# Multi-Draft: Generate multiple drafts and select the best one +# Uses ~2x more API tokens but often produces better results on first try +WRITER_MULTI_DRAFT_ENABLED=true +WRITER_MULTI_DRAFT_COUNT=3 + +# Semantic Matching: Select example posts based on topic similarity +# instead of random selection (no extra API cost) +WRITER_SEMANTIC_MATCHING_ENABLED=true + +# Learn from Feedback: Analyze recurring critic feedback from past posts +# and include lessons learned in the writer prompt (no extra API cost) +WRITER_LEARN_FROM_FEEDBACK=true +WRITER_FEEDBACK_HISTORY_COUNT=10 + +# =========================================== +# User Frontend (LinkedIn OAuth) +# =========================================== + +# Enable user frontend with LinkedIn OAuth login +# When enabled, / shows user login, /admin/* shows admin panel +USER_FRONTEND_ENABLED=true + +# OAuth callback URL (must match Supabase settings) +# Local: http://localhost:8000/auth/callback +# Production: https://your-domain.com/auth/callback +SUPABASE_REDIRECT_URL=http://localhost:8000/auth/callback diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6798c23 --- /dev/null +++ b/.gitignore @@ -0,0 +1,57 @@ +# Environment +.env +.venv/ +venv/ +ENV/ +env/ + +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# IDEs +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# Logs +logs/ +*.log + +# OS +.DS_Store +Thumbs.db + +# Database +*.db +*.sqlite +*.sqlite3 + +# Temporary files +*.tmp +*.bak +*.cache + +# Data files (optional - depends on your needs) +data/secrets.json +data/credentials.json diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md new file mode 100644 index 0000000..fd04982 --- /dev/null +++ b/DEPLOYMENT.md @@ -0,0 +1,388 @@ +# Deployment Guide - LinkedIn Post Creation System + +Diese Anleitung erklärt, wie du die LinkedIn Post App auf deinem Server mit Docker deployen kannst. + +## Voraussetzungen + +- Ein Server (VPS/Cloud) mit: + - Ubuntu 20.04+ oder Debian 11+ + - Mindestens 1GB RAM + - Docker & Docker Compose installiert +- Domain (optional, für HTTPS) +- API Keys: + - OpenAI API Key + - Perplexity API Key + - Apify API Key + - Supabase URL & Key + +--- + +## Schritt 1: Server vorbereiten + +### Docker installieren (falls nicht vorhanden) + +```bash +# System aktualisieren +sudo apt update && sudo apt upgrade -y + +# Docker installieren +curl -fsSL https://get.docker.com -o get-docker.sh +sudo sh get-docker.sh + +# Docker Compose installieren +sudo apt install docker-compose-plugin -y + +# Aktuellen User zur Docker-Gruppe hinzufügen +sudo usermod -aG docker $USER + +# Neu einloggen oder: +newgrp docker + +# Testen +docker --version +docker compose version +``` + +--- + +## Schritt 2: Projekt auf den Server laden + +### Option A: Mit Git (empfohlen) + +```bash +# Repository klonen +git clone https://github.com/dein-username/LinkedInWorkflow.git +cd LinkedInWorkflow +``` + +### Option B: Mit SCP (von deinem lokalen Rechner) + +```bash +# Auf deinem lokalen Rechner: +scp -r /pfad/zu/LinkedInWorkflow user@dein-server:/home/user/ +``` + +### Option C: Mit rsync + +```bash +# Auf deinem lokalen Rechner: +rsync -avz --exclude '.env' --exclude '__pycache__' --exclude '.git' \ + /pfad/zu/LinkedInWorkflow/ user@dein-server:/home/user/LinkedInWorkflow/ +``` + +--- + +## Schritt 3: Umgebungsvariablen konfigurieren + +```bash +cd LinkedInWorkflow + +# .env Datei aus Vorlage erstellen +cp .env.example .env + +# .env bearbeiten +nano .env +``` + +### Wichtige Einstellungen in der `.env`: + +```env +# Web-Passwort (UNBEDINGT ÄNDERN!) +WEB_PASSWORD=dein-sicheres-passwort-hier + +# API Keys (deine echten Keys eintragen) +OPENAI_API_KEY=sk-... +PERPLEXITY_API_KEY=pplx-... +APIFY_API_KEY=apify_api_... + +# Supabase +SUPABASE_URL=https://dein-projekt.supabase.co +SUPABASE_KEY=dein-supabase-key + +# Production Settings +DEBUG=false +LOG_LEVEL=INFO +``` + +**Wichtig:** Die `.env` Datei sollte NIEMALS committed werden! + +--- + +## Schritt 4: Docker Container starten + +```bash +# Im Projektverzeichnis: +cd LinkedInWorkflow + +# Container bauen und starten +docker compose up -d --build + +# Logs ansehen +docker compose logs -f + +# Status prüfen +docker compose ps +``` + +Die App ist jetzt unter `http://dein-server:8000` erreichbar. + +--- + +## Schritt 5: Firewall konfigurieren (optional aber empfohlen) + +```bash +# UFW installieren (falls nicht vorhanden) +sudo apt install ufw -y + +# SSH erlauben (WICHTIG - sonst sperrst du dich aus!) +sudo ufw allow ssh + +# Port 8000 erlauben +sudo ufw allow 8000 + +# Firewall aktivieren +sudo ufw enable + +# Status prüfen +sudo ufw status +``` + +--- + +## Schritt 6: Reverse Proxy mit Nginx & SSL (empfohlen für Production) + +### Nginx installieren + +```bash +sudo apt install nginx -y +``` + +### Nginx Konfiguration erstellen + +```bash +sudo nano /etc/nginx/sites-available/linkedin-posts +``` + +Inhalt: + +```nginx +server { + listen 80; + server_name deine-domain.de; + + location / { + proxy_pass http://127.0.0.1:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + proxy_read_timeout 86400; + } +} +``` + +### Nginx aktivieren + +```bash +# Symlink erstellen +sudo ln -s /etc/nginx/sites-available/linkedin-posts /etc/nginx/sites-enabled/ + +# Default-Site entfernen +sudo rm /etc/nginx/sites-enabled/default + +# Konfiguration testen +sudo nginx -t + +# Nginx neu starten +sudo systemctl restart nginx +``` + +### SSL mit Let's Encrypt (kostenlos) + +```bash +# Certbot installieren +sudo apt install certbot python3-certbot-nginx -y + +# SSL-Zertifikat beantragen +sudo certbot --nginx -d deine-domain.de + +# Auto-Renewal testen +sudo certbot renew --dry-run +``` + +--- + +## Nützliche Befehle + +### Container Management + +```bash +# Container stoppen +docker compose down + +# Container neu starten +docker compose restart + +# Container neu bauen (nach Code-Änderungen) +docker compose up -d --build + +# In Container einloggen +docker compose exec linkedin-posts bash + +# Logs ansehen (live) +docker compose logs -f + +# Logs der letzten 100 Zeilen +docker compose logs --tail=100 +``` + +### Updates einspielen + +```bash +# Code aktualisieren (mit Git) +git pull + +# Container neu bauen +docker compose up -d --build +``` + +### Backup + +```bash +# .env sichern (enthält alle Secrets!) +cp .env .env.backup + +# Alle Daten sind in Supabase - kein lokales Backup nötig +``` + +--- + +## Troubleshooting + +### Container startet nicht + +```bash +# Logs ansehen +docker compose logs linkedin-posts + +# Container-Status prüfen +docker compose ps -a + +# Neustart erzwingen +docker compose down && docker compose up -d --build +``` + +### Port bereits belegt + +```bash +# Prüfen was auf Port 8000 läuft +sudo lsof -i :8000 + +# Prozess beenden +sudo kill -9 +``` + +### Keine Verbindung zu Supabase + +1. Prüfe ob SUPABASE_URL und SUPABASE_KEY korrekt sind +2. Prüfe ob der Server ausgehende Verbindungen erlaubt +3. Teste: `curl -I https://dein-projekt.supabase.co` + +### Passwort vergessen + +```bash +# .env bearbeiten +nano .env + +# WEB_PASSWORD ändern + +# Container neu starten +docker compose restart +``` + +--- + +## Sicherheitsempfehlungen + +1. **Starkes Passwort verwenden** - Mindestens 16 Zeichen, Sonderzeichen +2. **HTTPS aktivieren** - Mit Nginx + Let's Encrypt (siehe Schritt 6) +3. **Firewall konfigurieren** - Nur nötige Ports öffnen +4. **Server aktuell halten** - `sudo apt update && sudo apt upgrade` +5. **Docker aktuell halten** - `sudo apt upgrade docker-ce` +6. **Keine API Keys committen** - .env in .gitignore + +--- + +## Monitoring (optional) + +### Einfaches Health-Check Script + +```bash +# health-check.sh erstellen +cat > health-check.sh << 'EOF' +#!/bin/bash +if curl -s -o /dev/null -w "%{http_code}" http://localhost:8000/login | grep -q "200"; then + echo "$(date): OK" +else + echo "$(date): FEHLER - Neustart..." + docker compose restart +fi +EOF + +chmod +x health-check.sh + +# Als Cron-Job (alle 5 Minuten) +(crontab -l 2>/dev/null; echo "*/5 * * * * /home/user/LinkedInWorkflow/health-check.sh >> /var/log/linkedin-health.log 2>&1") | crontab - +``` + +--- + +## Architektur + +``` +┌─────────────────────────────────────────────────────────┐ +│ Internet │ +└─────────────────────────┬───────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────┐ +│ Nginx (Port 80/443) │ +│ - SSL Termination │ +│ - Reverse Proxy │ +└─────────────────────────┬───────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────┐ +│ Docker Container (Port 8000) │ +│ │ +│ ┌────────────────────────────────────────────────────┐ │ +│ │ FastAPI Application │ │ +│ │ │ │ +│ │ ┌─────────────┐ ┌─────────────┐ ┌────────────┐ │ │ +│ │ │ Web UI │ │ API │ │ Agents │ │ │ +│ │ │ (Jinja2) │ │ Endpoints │ │ (AI Logic) │ │ │ +│ │ └─────────────┘ └─────────────┘ └────────────┘ │ │ +│ └────────────────────────────────────────────────────┘ │ +└─────────────────────────┬───────────────────────────────┘ + │ + ┌───────────────┼───────────────┐ + │ │ │ + ▼ ▼ ▼ + ┌──────────┐ ┌──────────┐ ┌──────────┐ + │ Supabase │ │ OpenAI │ │ Perplexity│ + │ DB │ │ API │ │ API │ + └──────────┘ └──────────┘ └──────────┘ +``` + +--- + +## Support + +Bei Problemen: +1. Logs prüfen: `docker compose logs -f` +2. GitHub Issues öffnen +3. Container neu bauen: `docker compose up -d --build` diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..3049370 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,39 @@ +# LinkedIn Post Creation System - Docker Image +FROM python:3.11-slim + +# Set environment variables +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PYTHONPATH=/app + +# Set work directory +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements first for better caching +COPY requirements.txt . + +# Install Python dependencies +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . . + +# Create non-root user for security +RUN useradd --create-home --shell /bin/bash appuser && \ + chown -R appuser:appuser /app +USER appuser + +# Expose port +EXPOSE 8000 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD python -c "import httpx; httpx.get('http://localhost:8000/login', timeout=5)" || exit 1 + +# Run the application +CMD ["python", "-m", "uvicorn", "src.web.app:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/config/supabase_schema.sql b/config/supabase_schema.sql new file mode 100644 index 0000000..407ead3 --- /dev/null +++ b/config/supabase_schema.sql @@ -0,0 +1,224 @@ +-- LinkedIn Workflow Database Schema for Supabase + +-- Enable UUID extension +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; + +-- Customers/Clients Table +CREATE TABLE IF NOT EXISTS customers ( + id UUID PRIMARY wKEY DEFAULT uuid_generate_v4(), + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + + -- Basic Info + name TEXT NOT NULL, + email TEXT, + company_name TEXT, + + -- LinkedIn Profile + linkedin_url TEXT NOT NULL UNIQUE, + + -- Metadata + metadata JSONB DEFAULT '{}'::JSONB +); + +-- LinkedIn Profiles Table (scraped data) +CREATE TABLE IF NOT EXISTS linkedin_profiles ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + scraped_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + + -- Profile Data + profile_data JSONB NOT NULL, + + -- Extracted Information + name TEXT, + headline TEXT, + summary TEXT, + location TEXT, + industry TEXT, + + UNIQUE(customer_id) +); + +-- LinkedIn Posts Table (scraped posts) +CREATE TABLE IF NOT EXISTS linkedin_posts ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + scraped_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + + -- Post Data + post_url TEXT, + post_text TEXT NOT NULL, + post_date TIMESTAMP WITH TIME ZONE, + likes INTEGER DEFAULT 0, + comments INTEGER DEFAULT 0, + shares INTEGER DEFAULT 0, + + -- Raw Data + raw_data JSONB, + + UNIQUE(customer_id, post_url) +); + +-- Topics Table (extracted from posts) +CREATE TABLE IF NOT EXISTS topics ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + + -- Topic Info + title TEXT NOT NULL, + description TEXT, + category TEXT, + + -- AI Extraction + extracted_from_post_id UUID REFERENCES linkedin_posts(id), + extraction_confidence FLOAT, + + -- Status + is_used BOOLEAN DEFAULT FALSE, + used_at TIMESTAMP WITH TIME ZONE +); + +-- Profile Analysis Table (AI-generated insights) +CREATE TABLE IF NOT EXISTS profile_analyses ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + + -- Analysis Results + writing_style JSONB NOT NULL, + tone_analysis JSONB NOT NULL, + topic_patterns JSONB NOT NULL, + audience_insights JSONB NOT NULL, + + -- Full Analysis + full_analysis JSONB NOT NULL, + + UNIQUE(customer_id) +); + +-- Research Results Table +CREATE TABLE IF NOT EXISTS research_results ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + + -- Research Data + query TEXT NOT NULL, + results JSONB NOT NULL, + + -- Topic Suggestions + suggested_topics JSONB NOT NULL, + + -- Metadata + source TEXT DEFAULT 'perplexity' +); + +-- Generated Posts Table +CREATE TABLE IF NOT EXISTS generated_posts ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + + -- Topic + topic_id UUID REFERENCES topics(id), + topic_title TEXT NOT NULL, + + -- Post Content + post_content TEXT NOT NULL, + + -- Generation Metadata + iterations INTEGER DEFAULT 0, + writer_versions JSONB DEFAULT '[]'::JSONB, + critic_feedback JSONB DEFAULT '[]'::JSONB, + + -- Status + status TEXT DEFAULT 'draft' CHECK (status IN ('draft', 'approved', 'published', 'rejected')), + approved_at TIMESTAMP WITH TIME ZONE, + published_at TIMESTAMP WITH TIME ZONE +); + +-- Create Indexes +CREATE INDEX idx_customers_linkedin_url ON customers(linkedin_url); +CREATE INDEX idx_linkedin_profiles_customer_id ON linkedin_profiles(customer_id); +CREATE INDEX idx_linkedin_posts_customer_id ON linkedin_posts(customer_id); +CREATE INDEX idx_topics_customer_id ON topics(customer_id); +CREATE INDEX idx_topics_is_used ON topics(is_used); +CREATE INDEX idx_profile_analyses_customer_id ON profile_analyses(customer_id); +CREATE INDEX idx_research_results_customer_id ON research_results(customer_id); +CREATE INDEX idx_generated_posts_customer_id ON generated_posts(customer_id); +CREATE INDEX idx_generated_posts_status ON generated_posts(status); + +-- Post Types Table (for categorizing posts by type) +CREATE TABLE IF NOT EXISTS post_types ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + customer_id UUID NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + + -- Type Definition + name TEXT NOT NULL, + description TEXT, + identifying_hashtags TEXT[] DEFAULT '{}', + identifying_keywords TEXT[] DEFAULT '{}', + semantic_properties JSONB DEFAULT '{}'::JSONB, + + -- Analysis Results (generated after classification) + analysis JSONB, + analysis_generated_at TIMESTAMP WITH TIME ZONE, + analyzed_post_count INTEGER DEFAULT 0, + + -- Status + is_active BOOLEAN DEFAULT TRUE, + + UNIQUE(customer_id, name) +); + +-- Add post_type_id to linkedin_posts +ALTER TABLE linkedin_posts + ADD COLUMN IF NOT EXISTS post_type_id UUID REFERENCES post_types(id) ON DELETE SET NULL, + ADD COLUMN IF NOT EXISTS classification_method TEXT, + ADD COLUMN IF NOT EXISTS classification_confidence FLOAT; + +-- Add target_post_type_id to topics +ALTER TABLE topics + ADD COLUMN IF NOT EXISTS target_post_type_id UUID REFERENCES post_types(id) ON DELETE SET NULL; + +-- Add target_post_type_id to research_results +ALTER TABLE research_results + ADD COLUMN IF NOT EXISTS target_post_type_id UUID REFERENCES post_types(id) ON DELETE SET NULL; + +-- Add post_type_id to generated_posts +ALTER TABLE generated_posts + ADD COLUMN IF NOT EXISTS post_type_id UUID REFERENCES post_types(id) ON DELETE SET NULL; + +-- Create indexes for post_types +CREATE INDEX IF NOT EXISTS idx_post_types_customer_id ON post_types(customer_id); +CREATE INDEX IF NOT EXISTS idx_post_types_is_active ON post_types(is_active); +CREATE INDEX IF NOT EXISTS idx_linkedin_posts_post_type_id ON linkedin_posts(post_type_id); +CREATE INDEX IF NOT EXISTS idx_topics_target_post_type_id ON topics(target_post_type_id); +CREATE INDEX IF NOT EXISTS idx_research_results_target_post_type_id ON research_results(target_post_type_id); +CREATE INDEX IF NOT EXISTS idx_generated_posts_post_type_id ON generated_posts(post_type_id); + +-- Create updated_at trigger function +CREATE OR REPLACE FUNCTION update_updated_at_column() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Add trigger to customers table +CREATE TRIGGER update_customers_updated_at + BEFORE UPDATE ON customers + FOR EACH ROW + EXECUTE FUNCTION update_updated_at_column(); + +-- Add trigger to post_types table +DROP TRIGGER IF EXISTS update_post_types_updated_at ON post_types; +CREATE TRIGGER update_post_types_updated_at + BEFORE UPDATE ON post_types + FOR EACH ROW + EXECUTE FUNCTION update_updated_at_column(); diff --git a/data b/data new file mode 100644 index 0000000..3295d93 --- /dev/null +++ b/data @@ -0,0 +1,130 @@ +return { + "company_name": "MAKE IT MATTER", + "persona": +Christina Hildebrandt, Co-Founder MAKE IT MATTER I Kommunikationsberatung, Interimsmanagement, PR & LinkedIn-Kompetenz für KMUs und Agenturen, die wachsen wollen. 25 Jahre Erfahrung in Konzern, Mittelstand und Agenturen.", + + // Die Ansprache-Logik für den Critic-Node + "form_of_address": +Duzen (Du/Euch) - direkt, leidenschaftlich und hochemotional, + + "style_guide": +Kommunikation als Sales-Infrastruktur. Fokus auf ROI ('Alles was nicht einzahlt, muss weg'). Stil: Laut, ehrlich, begeisterungsfähig ('Halleluja!', 'Galopp!'). Nutzt Storytelling durch Flashbacks und persönliche Anekdoten, um strategische Punkte zu belegen.", + + "topic_history": [ +"KI-Suche als Sales-Infrastruktur","Kommunikation treibt Sales über KI","Positionierung für den Mittelstand (leise/komplexe Unternehmen)","LinkedIn als Teil der klassischen Unternehmenskommunikation","Die 'Zweite Pubertät' 2026: Neuanfang und Mut" + ], + + "example_posts": [ + `𝗞𝗜-𝗦𝘂𝗰𝗵𝗲 𝗶𝘀𝘁 𝗱𝗲𝗿 𝗲𝗿𝘀𝘁𝗲 𝗦𝗰𝗵𝗿𝗶𝘁𝘁 𝗶𝗺 𝗦𝗮𝗹𝗲𝘀 𝗙𝘂𝗻𝗻𝗲𝗹. Gute Kommunikation ist Sales-Infrastruktur. Das ist unsere Chance Christina!!! ❞ + +Das sagte Max kurz vor Weihnachten in einem Café zu mir und ich schrie ihn fast an: +"Max!! Sag das nochmal!!! Das ist genial!!! Das ist unser nächster Post!" + +Und… +DA IST ER:)!🚀 + +Warum ich so begeistert war? +Ich glaube, dass Kommunikation unbedingt auch Sales „driven“ muss. +Make It Matter ist entstanden, weil es nur wenige Agenturen gibt, die so radikal+konsequent Kommunikation als Sales- und Leads-Wachstumstreiber mitdenken wie wir. +ALLES was nicht auf die Unternehnensziele einzahlt muss radikal WEG. +Lisa und ich haben EIN Ziel: Die New Business- bzw. Vertriebsmannschaft soll uns lieben:) + +KI spielt uns in die Karten und ist wie ein Brennglas: +Die KI entscheidet, wer und überhaupt noch empfohlen wird und wer auf Shortlists landet. + +JETZT muss man kommunikative KI-Signale setzen und das „Dach decken“. JETZT kann man KI-Pflöcke im Netz einziehen, damit man im Sales-Entscheidungsprozess VORNE sichtbar wird. +𝗞𝗼𝗺𝗺𝘂𝗻𝗶𝗸𝗮𝘁𝗶𝗼𝗻 𝘁𝗿𝗲𝗶𝗯𝘁 𝗦𝗮𝗹𝗲𝘀 ü𝗯𝗲𝗿 𝗞𝗜. Jetzt und künftig noch mehr! + +Im Gespräch mit Max wurde das sehr klar: KI recherchiert nicht mehr wie früher, sie priorisiert und bewertet, lange bevor der Vertrieb überhaupt spricht. +Es gilt: +👉Wenn KI ein Unternehmen nicht eindeutig einordnen kann, ist es für Sales nicht im Rennen. +👉Genau hier wird Kommunikation zur Sales-Infrastruktur. + +Make It Matter Matter schafft dafür die strategische Grundlage aus Positionierung, Themenarchitektur und Public Relations als echte Third-Party-Validierung. +(Klingt gut oder?! Und ist wahr!!) + +Unsre Stärken? +👉LISA HIPP Hipp übersetzt diese Klarheit auf LinkedIn in wiederholbare Narrative, die Einordnung erzeugen. +👉Max Anzile sorgt dafür, dass diese Signale gezielt distribuiert, getestet und messbar gemacht werden. +👉 Und ich bin Kommunikation und Public Relations durch und durch (Konzern, Mittelstand, Agenturen. 25 Jahre + +So entsteht für undere kein Content-Feuerwerk, sondern ein SYSTEM, das SALES-VORAUSWAHL gewinnt. +💸💸💸 + +Wir sehen in der Praxis: +Deals mit mehreren Kommunikationskontakten +👉schliessen schneller und stabile +👉und brauchen weniger Rabatt +weil Vertrauen steigt und Vergleichbarkeit sinkt. + +Unsere Überzeugung ist klar: +Entweder Kommunikation ist messbarer Teil des Sales Funnels oder sie wird 2026 gestrichen. +Sorry to say!! +Good for us!!!!🙃 + +Christina +#Kommunikation #SalesSupport #SocialMedia #KISEO`, + + `Ein unstrategischer Flashback-Post aus dem Flixbus Richtung Bodensee. +#AboutEducatedAwareness #AboutMittelstand #AusDerHüfteGeschossen +Sorry Lisa🙈 +⸻ +Gestern habe ich brav den Keller aufgeräumt und bin an einer Kiste alter Job-Fotos hängengeblieben. +📸 +Ich in Amsterdam mit Tommy Hilfiger. Beim Abendessen mit Lovely Annette Weber, Flohmärkte mit Chefredakteuren, Bilder mit Steffi Graf, Sönke Wortmann, Natalia Wörner und Heike Makatsch in Nördlingen bei Strenesse. Cartier-Juste-Un-Clou-Launch in New York. +Halleluja war ich wichtig, dünn, lustig und jung :) +Was für eine goldene, sorglose Zeit*. +💫💫💫 +Bei diesem Foto👇 musste ich so lachen. Ich weiß noch, wie es entstanden ist: +Wir waren Sponsor beim Bambi. (Wir – damit meine ich Cartier damals.) +Mein Chef Tom Meggle gab mir damals die Erlaubnis, meine Freundinnen Bine Käfer (jetzt Lanz), Celia von Bismarck und Gioia von Thun mitzunehmen. +Wichtig wichtig. +Wir also aufgedresst wie Bolle, kommen an den roten Teppich, Blitzgewitter, 4 Girlies. Dann ein Rufen aus der Fotografenmenge: +„Christina!!! Kannst Du aus dem Bild gehen??? Wir brauchen die 3 Mädels alleine!!!“ (weil echt wichtig). +Ich weiß noch, wie ich fast zusammengebrochen bin vor Lachen. „Hey!! ICH habe DIE mitgenommen!!“ 🤣🤣🤣 +Ein Bild von uns 4 hab ich dann aber doch noch bekommen. Plus einen wundervollen Abend. +…. +Was das mit Make It Matter äund „Job“ zu tun hat? +Wenig. Aber ein bisschen schon: +Es zeigt aber, wie sehr ich mich verändert habe: +💫𝗛𝗲𝘂𝘁𝗲 𝗮𝗿𝗯𝗲𝗶𝘁𝗲 𝗶𝗰𝗵 𝗮𝗺 𝗹𝗶𝗲𝗯𝘀𝘁𝗲𝗻 𝗳𝘂̈𝗿 𝗨𝗻𝘁𝗲𝗿𝗻𝗲𝗵𝗺𝗲𝗻, 𝗱𝗶𝗲 𝗴𝗿𝗼ß𝗮𝗿𝘁𝗶𝗴 𝘀𝗶𝗻𝗱, 𝗮𝗯𝗲𝗿 𝘇𝘂 𝗹𝗲𝗶𝘀𝗲, 𝘇𝘂 𝗸𝗼𝗺𝗽𝗹𝗲𝘅 𝗼𝗱𝗲𝗿 𝘇𝘂 𝗯𝗲𝘀𝗰𝗵𝗲𝗶𝗱𝗲𝗻, 𝘂𝗺 𝘀𝗲𝗹𝗯𝘀𝘁 𝘂̈𝗯𝗲𝗿 𝗶𝗵𝗿𝗲 𝗦𝘁𝗮̈𝗿𝗸𝗲 𝘇𝘂 𝘀𝗽𝗿𝗲𝗰𝗵𝗲𝗻 +(Es gibt so so so tolle Firmen, von denen noch keiner etwas gehört hat, meistens mit unglaublich netten Teams!) +💫 Heute habe ich mich in erklärungsbedürftige, komplizierte Produkte, EducatedAwareness und #LinkedIn als Teil der klassischen #Unternehmenskommunikation verliebt. +💫 Heute liebe ich komplexe und „unsexy“ Aufgaben, die ich knacken will. +So verändert man sich. Ist das nicht verrückt? +Schön war’s trotzdem damals. Mensch, bin ich dankbar! +Euch einen schönen Sonntag. Eure Christina +www.make-it-matter.de +#MakeItMatter #Kommunikation #Wachstumstreiber #Mittelstand #PublicRelations +PS. Ich setzt mich jetzt gleich an das Papier liebe Lisa. Bis 11.15h gab ich fertig😉`, + + `𝗪𝗘𝗥 𝗠𝗔𝗖𝗛𝗧 𝗠𝗜𝗧??? Ich habe entschieden, dass 2026 meine zweite Pubertät beginnt #MakeItMatter🚀 +Halleluja! Endlich hat das neue Jahr begonnen. +Mit meinen Freunden habe ich gestern beschlossen, dass ich das kommende Jahr jetzt mal völlig neu angehen werde: +Ich stelle mir einfach vor, ich wäre in meiner zweiten Pubertät!!! +Ok, ok, dieses Mal mit besserem Wein, einem kleinen Kontopuffer, 25 Jahren Berufserfahrung, besserem WLAN, aber mit dem gleichen Gefühl von damals: +𝗗𝗘𝗥 𝗡𝗔̈𝗖𝗛𝗦𝗧𝗘 𝗟𝗘𝗕𝗘𝗡𝗦𝗔𝗕𝗦𝗖𝗛𝗡𝗜𝗧𝗧 𝗪𝗜𝗥𝗗 𝗗𝗘𝗥 𝗕𝗘𝗦𝗧𝗘!!! +Ich fühle es! Alles liegt vor mir und ich kann Pippi-Langstrumpf-mäßig einfach alles erreichen, à la: +„𝘐𝘤𝘩 𝘩𝘢𝘣 𝘥𝘢𝘴 𝘯𝘰𝘤𝘩 𝘯𝘪𝘦 𝘨𝘦𝘮𝘢𝘤𝘩𝘵 – 𝘥𝘢𝘴 𝘬𝘢𝘯𝘯 𝘪𝘤𝘩 𝘣𝘦𝘴𝘵𝘪𝗺𝘮𝘵.“ +(PS: Wann haben wir das eigentlich verlernt?) +Ich finde die Ähnlichkeit zu meinem (nicht mehr pubertierenden) Sohn Lenny wirklich erstaunlich: +Er ist genauso begeistert von der Idee, im Ausland zu studieren und sich etwas Eigenes, Großes aufzubauen, wie ich besessen von Lisas und meiner Make-It-Matter-Idee bin. +Jetzt sitzen wir auf dem Driverseat unseres Lebens und können Kommunikations-Burgen aufbauen: das, was wir am allerbesten können! +Wir mussten gestern bei dem Erste-und-Zweite-Pubertäts-Vergleich wirklich lachen: +Auch die Hormonprobleme sind ähnlich. Nur habe ich meine im Griff bzw. hinter mir. Lenny hat noch eine (aufregende) Reise vor sich. +Muss man da nicht automatisch grinsen? +Dieses Grinsen werde ich mir für 2026 vornehmen. Ich werde mich öfter an den Sternenzauber meiner ersten Pubertät erinnern, an die unaufhaltsame Kraft, die Fröhlichkeit, den Mut, die Neugierde und die Ausdauer. +Und wisst ihr, worauf ich mich am meisten freue? +Auf die Momente, in denen ich bei den Social Media Pirates - we are hiring – we are hiring – ins Büro komme und Max Anzile (der übrigens 20 Jahre jünger ist als ich) zu mir sagt: +„Guten Morgen, Boomer!“ …und frech grinst. Auf seine KI-Sessions, seine Ideen, neue Welten, Hummeln im Popo. +Und natürlich LISA HIPP, die allerallerallerbeste Geschäftspartnerin, die ich mir vorstellen kann. #ZamReinZamRaus +Seid ihr dabei? 💫 Zweite Pubertät ab 2026? 💪 Vollgas? ✔️ Lebensfreude? 🫶 Kommunikation neu denken? ✔️ Jahr des Feuerpferdes? 🐎 +Dann GALOPP!!!!! +Ich wünsche euch, dass all eure Träume und Wünsche in Erfüllung gehen und dass ihr den Mut habt, etwas dafür zu tun! +„Wenn die Sehnsucht größer ist als die Angst, wird Mut, Erfolg und Lebensfreude geboren.“ +Ist das nicht schön? +Happy New Year und happy neue Lebensphase(n) +wünscht Euch Christina +#MakeItMatter #Kommunikation #PublicRelations #LinkedInComms` + ] +}; \ No newline at end of file diff --git a/docker-compose.ssl.yml b/docker-compose.ssl.yml new file mode 100644 index 0000000..ad2db09 --- /dev/null +++ b/docker-compose.ssl.yml @@ -0,0 +1,67 @@ +version: '3.8' + +services: + linkedin-posts: + build: . + container_name: linkedin-posts + restart: unless-stopped + expose: + - "8000" + env_file: + - .env + environment: + - PYTHONPATH=/app + - VIRTUAL_HOST=linkedin.onyva.dev + - VIRTUAL_PORT=8000 + - LETSENCRYPT_HOST=linkedin.onyva.dev + volumes: + - ./logs:/app/logs + healthcheck: + test: ["CMD", "python", "-c", "import httpx; httpx.get('http://localhost:8000/login', timeout=5)"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 10s + networks: + - proxy-network + + nginx-proxy: + image: nginxproxy/nginx-proxy + container_name: nginx-proxy + restart: unless-stopped + ports: + - "80:80" + - "443:443" + environment: + - DEFAULT_HOST=linkedin.onyva.dev + volumes: + - /var/run/docker.sock:/tmp/docker.sock:ro + - certs:/etc/nginx/certs + - html:/usr/share/nginx/html + - vhost:/etc/nginx/vhost.d + networks: + - proxy-network + + acme-companion: + image: nginxproxy/acme-companion + container_name: acme-companion + restart: unless-stopped + volumes_from: + - nginx-proxy + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - acme:/etc/acme.sh + environment: + - DEFAULT_EMAIL=ruben.fischer@onyva.de + networks: + - proxy-network + +networks: + proxy-network: + driver: bridge + +volumes: + certs: + html: + vhost: + acme: diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..1591494 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,37 @@ +version: '3.8' + +services: + linkedin-posts: + build: . + container_name: linkedin-posts + restart: unless-stopped + ports: + - "8000:8000" + env_file: + - .env + environment: + - PYTHONPATH=/app + volumes: + # Optional: Mount logs directory + - ./logs:/app/logs + healthcheck: + test: ["CMD", "python", "-c", "import httpx; httpx.get('http://localhost:8000/login', timeout=5)"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 10s + + # Optional: Nginx reverse proxy with SSL + # Uncomment if you want to use Nginx + # nginx: + # image: nginx:alpine + # container_name: linkedin-posts-nginx + # restart: unless-stopped + # ports: + # - "80:80" + # - "443:443" + # volumes: + # - ./nginx.conf:/etc/nginx/nginx.conf:ro + # - ./ssl:/etc/nginx/ssl:ro + # depends_on: + # - linkedin-posts diff --git a/logo.png b/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..581b3f1b68ea05fd09df50d84f3235575bb9412d GIT binary patch literal 53598 zcmeFYhf`DC_dQHUMZk{Ic~C(SP&%QD6hY~NAOsbVCN&9=gir-+6cqub_f7z*fdmi* z5l!e(2nk3@NFb3CA_U0G=l5s4^PRafcjnHSd+yn1-+k6zd+ir?HWtFi#E)@taS7kH zyyd{f#Y^Gh;*L5ZaQI}hO_)3TM<%sMTP6!uyZ+At|7U^!zgqwxJd0v#28VP)Xo0xz^n*3h0ctpyX_UVD zGL4fjTk8uj@7bG6RMkn_X(Ga|wNJ2i5BzfVzBWvFg}L6tg3!t7;mX5X`9DiO;jegf zxnQXut9l$)l|!Jz#faLr?GKcvOXPsiV4YazL30FhelKF?)#m*J>sita+P0v55@8fn z+hf$M#Dt}nP_PsA4_uzyubJDdc+d{)KyDkw&_SxYS87QJ=jAx41>t!mrwUf#WrzE^ z_EBa$b#Q$95#LTJOUu=uQf!jSsTuS}*+vH$WM)PMqFbl-))eZ)L z2^~lxD%{6iv~+y(GX(Rq8=FoSj0q+WY=t>VEWBI4lT{x-JsTM-r8SMbG>(}nOto>5 z1m%~GcGj@oy^*Q`iO_tj9|%fOS8Wjy*JhPyv4*O(?5^5La<6?|j7uvjeR7$$U5A65 zF(a!o=h||)wcn>lFJ8)tBa2DHS96_IesjQ^%i1AVX{b+)+A&l|k~4lScjw09d0H~I zvtw0uLOL2<5Oz(+Q{ho`c0Zu!USvFCgct5aW=_{f@cm=XlN*5&QdK>WrJK+y zY)Eqqu*pliL~rj%oFs-HuvcK!6SBWGL)zRhWjo(3yf7H{GsB*}Geu_!o&wJWR!O}c=ob6)UnCw+wBgB5|dxy-PTqyTa`8M%0I{11-lvpp4SywP=y4yib zeR3egN#$4}=AuHQbrc3~NO=iD((uzN?yj17Iu+9?ZVYW#gOJHR$@IG5^d=GnI*tnR z**B09^CY$yHSdzp@?}~&-6q9wtXWOzDpEXeLh3+ukgPz5&Iv+(+?USBl`q^+&rkb6 zU3wAc0WRWMLSbl-uC?w-kjq`Xu|f7AAbp##EWUx|GboJxBTCw*#_mbHL*$DKJPr%w(#;I!lA$qfbS7s%EFXrJ-GHuByk0!5-q!|Pz}wSXe_ zW-jdO<^x>lw(CG3DRy7*J*0DbEW&NMT^m7c|NVqnK(Ami3j>DO0GC!m%c?W$!N5+$ zX$lfDeOwmE{PROEtVGc*i)1{hUYpVtd_N%Ph&bnKVQfd=&z`71yS76IzdaGlJp(a+ zh9o#Y?bmu>yB}gZThhYC*knAiXLPu2Zz8pj#Ucmo?%Mxulc-}cs%W&0+rKFHJ657- z|6Jy_lD&f@!!axMNXR1wIB-dqm7>R?6-Lj^7mkJblUL6VM21T0MrerZ*hrSP3Z5x0 z9yd*8=@is(0D@}U*5oowCCC}KfXSiWSB|{Fi4B$%OnY$$lDR;Z;^H|EQLERy$hyB; zinm6dHrnn>Au6)xS)8RbzsRHCb*J)-(z>vdEY8rr(O{=*vrHF^q}u#59kV9Y(;Jv|!;s^}z>pQHBkP0j z^c0h0Ya@L_IYpo@gZ|3E*JLHmT}VAq-eS7at=OlRYh?2 zK9w$wqDI-JtSsR0o>50w!#yo_qS5Y+_G5b@%!DYcwJcklJW+ERM#Hi(0@zJUB~eu>lkOPR0i=Ys* zY1Wzz>GB3-w-VE-M&t~}V%Ox!D!=@NsStqMO0Sc_Q`Y4oU)0o|G;^PYSS0xs5-{Yx z^gY#5PBCs>${mR^hbD&xRjkKES08AG_=bSEeYms-^FiCYti9hoC3OQ(Hl~&V-%Q`D zWXIkczS=5WxI|dbkJg5`v(i5CH9impLb=zhsRqpFG!vHPYmV^>JEp4*+}(zb?5aRi z=-2G`&T#&brXax~Q8QgJ<(TDm#U0hsw>IKgS1g=OJY`iMR;mQ}eFLZ@MLQ-^{6cA^60e%+}>p$4`8zlY;`3f+MCCYal%eMoUXHyY6@MPFxKD9 zXfORn^%oldC@z)&l3v~;%N$TV_svEB zNr<_=uS~fkyfwd4WT0E?p8YZZO7X<+J9e(f7ap`Nv+GpZ0X5F7^??y%-EFX6&(g_) zznsTj@Vp=dD$Z(Z>WzU8zM&?i^|509zTv!k3#(c93NT88{6>aIssC>xlAXoO>JXuw zw*8-F!DjWz@2$j=#}#ld+FTx0p7{xgEQNn6RL|F*gue%*5I*fm7I<4+;Uv<&>Z$Ia zWp;;c_ZYS8#n$%kzf^l-TJNC0M9(;h4`I;LY1^C*>1k!K z1Eza|mT0jIYOFWZv3iXf?!iKlpyb$xP{-Q}DOSCtyP*t$FJ54QqXd4Myq)bY_?i5Y zWxH)d>k4UVopYVIYGqvnBwvpTRhN2da zgQg`LH&+??$Gj^z#R1BV>l^>>x+6if|>HQ z7xcPmr{`yfYD?ay$=wpaFn9_EC=q*=q(cok9C8HA54aRqbb4*zw z>TQ$I(`7P%5MMCK@3sm9y>WNVP#3 zLMt@VN7V;rFijU`MB!wvx$++zPH4qj?zJ<+ARD zOWw~~iMc7Y^iSy4k`KAymekoQcuX9kx|D7r*!{8cr0Av4gZV%i*xH~>Yz}PQL)0k7 z3w&pvNrABfKP>b!X&LyEhkt`FPV8I$$>AnPB~(99c6(X_BFOVE5yZeDNc_;4?z@#% ztQ0fe#<-xVHF4=8RV`nb3)TIu6reb+iSouM4`@!)L5}dah5r3)8z4s%5?k`q!Rh?k z^*s3~HQfee7a7rm@7=$z4&-_6$DC z1hk;qcecr5<{dnXsu*;4AK%7Ed;A7VLFB@ML`P+<9N~|Brq?+ZLwW7K7_UDWzhuY^`u`~%UU#0nQC@&@htvIBZJ390_mIFD#VQ?+i zh_%$y2gc~cg5-M=?f4-kCqx;DcyLi-@^N-QZvvtW(`Iu~|E7kl!gWrvMGtH^)vKw+ zX7Dv?qedZEpEK>-v$upd-27Vkw+zlJjuh{y4-_;eql)FeGC=vEsa!xLX1kM&VGgOr zbRJZ2w%w6%k9^v%s)nuRNo!&|6H~{!=Md?w3fuer=tV0^9bK^nX|1Dq?};8D+xl8; zAdZ{|!vMofHTo6m#gik$w14vH20OXN0%TK&lUHMPsRF;&;8_T-<>)U@2yyN`-)r$4 zqwob?q-YEw>khv8bzly_$|6@bf1xSj8n&?sF={sWvLu{R_J#IiQ&dOrN2fycrn=`r zuGs89+Jp0WF{Ee2{<1Q?J``t%w0Jn2PyS6Qf`+7iln{F|P2Q z1-nM121L=6`L{^I?3UsSV|PqFyOZoiJvWAYXJVt4T5)X~KI!hU2R`YBn|toIm21Gg z;`gm=!j=bNvqS5JMpgbTNKZ-Abfa?X*CSUjhqw@s#|w+2ypoC9o2ot7d!vfjXBS|Y z34(=~rKMK~tD|jqcA9&ly5Fl|H7{F+$!R&x-}&VG6| zX#Ol?e_~U9;r`!3#im`uww2A4RRfu>B=oASp-y(-wuPvc*8|V?j(WtCAm~HYoqm?g z)yUSa7!KOg?J8|UrA=V_be&#Rmy1UcG296fvTLXxTT1y8kRX(*jz14d1v#YGe}2uk zyp^SwQ)_bNd_+~|(NKw1!+pXoji(>eJtX|07hK-(Uv9>)XrYFzy zOf0>5z{ix{1)jFgcUiEGORKbwvpFLB2p$maVC3%v;0tLM$lLD-d)ml2_$(HJng5^~ z9k))A6cR6pDh-~ETq@`Kb)4ep^^%rM3Xv;9C&tlA6SBF6^)i-sD^V96J*t@vlW!)s z1tH5t4Gq-v$ys6MH*0NA!TS>|iHvMkE4Fwxm9v%+`%UKn7?Hs}QC&z#l$?0+lnan~ z?XIc%w%lSq$$9(ffBJuSYv*q-{DY($9{6+bPIFW%c1vs|g9gK#QY55Zp=0-KZKqGe zS0+<_wtTRHS?Vxf0?f&RIKz}p7sJif%}k~^&xj9ccL%ApP@pu9HjBeX;*Uu6X0d69 zq(++5elN@KIG|@fKS#JhK_}8!5PPG@J;M;Glwq^~wFAG7Ai!7jOC1xj!f9UJlrHN9 ztVEt|(2WBB*SF8*@h-(_$sUUqvT_ypj?P5?H7yLdbVA9J4A*hdM{+#|ghU^Q9cCa= zq{UcA#@VJ4?41Mc49w9+$niFlT^oVC^zVHD?(SSugtA!i{_{3I#sRPuwRX9SmQLzY z<7K*V=v~rcRZ7Q!MGaxpzlka6z?W^G>MZQR>yrkt(%6R8G#-%nn+?9J$M4BSZ>SyH zyPgsr-_SjfAgCSJKaA=gdUAd=5?r;;`0XuhH%trp_jwbdQYqpc%yUY1tWnAFHjeb~wPq-nd&V66qvtg>eyw9@#t-wWc zh5)8J>lm8PYW@6>l7(N>XWte7%K370?|PuGoFI5RY5u(agw%-t9W7fsL2&wXsN z-(qN*AeslZd2@ENR}_#poR}t9IFK1rXs%QcC1|&n80oj-W{=+9tVh*5(0)CsZm2z| zLvJ%y3Rw{1Bd20tlbDqLq)C%s1mXFuxhR-lB+c^Uog(6f zA~*y!P%)lMI`u0rar(Z)=U43e4$zb{j|rb&ap$h|NKwlumlY!|3-$CWm>bjK`=^41 z%NA0X?kiRoi_bzh|Ghf}Vs>*IdMA$r0g|UP%2w_R{m5kz7=1My#0PTR>I2Nb$WJT8 z&*&`pHS#OieDS=(huaTan+@J30xT8Ym;!o#8aN)#AYmqZbgFt-QXy$o>1`tUR+95~%orn*H&+Pld%Ux78wCFNCGzM8 z3#qTJWz|z6$*=0COO)$k6ShuP)h?KX#yrTtaDVaTVl7Dp@lioL^d24iP}0|{ue;Fy z(xv?#(lZWHdt!0NsmPuDT5YL`S_==a34>LHw8}}dQIY0Gca{I7`QkcHWqk7z$r1Yg zAd0$xla3nw;ngmH)fVY*OHAkyxFzKl*;{^(o2(=Ft1WalPhl z(_y59Q4i5|ft&>!!GE-=YQT=G4(|LN& zX{~iUsD7wW2!jYJfoXjGNPizhS-4=mSxkb>eUC;ujp4KEyG>!jk>sc&-*g2IcB?WD zHsCHNY=(TVAo8psr)gv#Z%ts$3{u-8dCX!5_2wTT*t&M!MHT zt+v6?LW!T)$tmLLKL0vhQ64qTwrZ)>c;UGp3piNmvwSJsXvr`6LJacz1%}n~F_^SGQPeL#;-qTaXz(NT`b{7AN#=k=r!3*Kh7ohCW zE1s^VskU?%ulDdIUQksu)gkE|uSt{=t;I{)TqtuQAW~c$IuiI?PuLtPm7V<1$+)5* zoS2GTD#OYi%{%FmB?zqu5>KA-bulD zR_sGVm=*L-Z3-}sUSya(&W0Imv<$;n2f~*A@w6KQn2tnn3Yc0cUO?tQ67{#!@C=n+ z;0k%RVKnhU{%=9b@$)(GiznnXTx%3IHipVT)dg z64?wLOKVPMUpHg8x@48CnF=-nWg&t+0_q;~WuR}QG)%F!k7?s9$5WG;Oi zke^KUeP=EW)(luQaankqe346MD^JXLYW-}{c#*T$$Lq049f8MDyf9`FuXRCO(CIQZ z#)(o6zTQAaxNPEfBc;kSTlY^!v6haJYg~AkZ9$wL(_xGZB9SqIZHE7<<+jdAelm9# z-x75Uiq8Q;{^Nc`K1**fS0LUJyvUV-<)?SHnoiJOCqJQZ+P%KO_s`e9OBK1q^};R^ zz?B(w{9oYFuD3p{42qL`uJGdo9)|IJlmjl4E6sO)uTBZ#iqq)R-B$M8r>>eHi`VjG z7!)TEr0xbip207(Z_4N_= z*YDGY78-7TUEO5sP+)QN;{RS1ZN!o-cmS5W7v zQIq`CneLUdW3-*L63SFPa`e8dia4zE@Ai7AvnXo7bj;Gp)IRm3W=E}yZGoZd%a@#BopVDn4Yd*?-$OhtXf;6z)ulPu zY;mRtTQzFC^TLSdzg3s%<^flpm1D)s6XvN9X45<6ynpmA+kH7zG#Qj1V%!r&HFqGBI#_|X8#NO^B*FFze)alCEnN&on!{Blm~Pvs zH3+VLtJtXH52Lnp~mt?mf z=i?ct_IBpv&ASMQ>RL2IQx%ocT`L_HEjhx!;$G=3LDo58#c-W{PiUf(@~?59%D7`8 z10-pg!vU&mojM+a;DR4yZ$XCcbrs{-zU&^gl=cM!7qy-&yw{2OQ1MWe^?i52?{-GG zYj$W#)^V&Jz(@M`C?E~y{p#}{f9sYqSh~@tmmMDu6b7}AE=+yxbS@!gm9_f@iRA?n zn5XhUn5!8=WAS%@j#pcxI_^Ezp#qq3sMxoSvw!64`WY5u<-S)c`3FjmKLT#rVBOLC2-W zR^mz;3SAVSXV&sN&PR)DO7oB+nTU|>A4PZG^Zf7&IIDSc;>5|Ec7g5Gwa_%~sEwyx zmF~vM>FrZ4|C~EpR=MgK)js_6@+CKwzRvz*<%uxn^Whd!^T`a}+fRpYFj?QDjMzoVUa3_AzkklG1^Rg&R zDp5rgRpBZD9q}neTlGaWw=9zQ{7J#@T`Qgr(wXCdBWzzjYCGz z(NRrVx@JvoMnW!N59)QRJTY-<-15xak4MV_rzZm&27ZLanO+Y4&39e?KJf|lcvT~? zFM6wB3V*8h!yi|o^LHw(G=~c@QKZjD5m?!1a8%IH%uPAdrI4SZDC#J^+j_O2tS!}~ z5*U~>@Rsa0*f*aCqe+9gQSg!XUp??sC4y*ex>Q3z34oLheI3!$&=^5+^kynyIV-9TDal5|D|-^X#VBQjS``W+9rguwsMG?aYvTOk~CH2d4u#; zy`4&}EXIGfmv2_&PwUpwXxrrZQhG+3_UvG}%(2Lx#6lx`aMCN|0+0;I1}R?vGI&0z zP85$3O3j!$aUrUE;(5gzMID)>0+(Eg9Vz{`_XnMBs7aEpe{C=D(JcO6EgLf&uH5dL zsxuOpUwPrk8ap|TI8We4U;4ZIW0x>tSm5ycx$TT&^H3sI|F?$5d+5It6HXzmkyn|N zqvDhI`N8@W4K>l9iA3&vTHxVr&@YY4BLj%Mu{KVb)=_ozu(uFE@-GEL z{hX3TbIkQrnQXFG8GM3Dz`j_j@Tyl#hhEj%EK_U(+Mg0WOBl+y<^4fnx#pzKn*N=*>Z;}oBkc9Sjj6hzAY5}Nx=EFa-i`4%uv z87U%Cb5fVBk=N69c27nNZCF$U#rwVI2@nKLrFJh}_0j9^c#yPi-`OYG7U&f3#3+?0Z%q#iZ~c7e1m? zq~z`da<6^Yi?)>)-R-uG^H-hOp%zieEf_;3Q0O*{5=xf>3+78JSt9d|AV>Hwf6Ft{ zi`xo7DL_wkzESXijGLkZ(SXx;Sw5Otzr_HEMYF+$db>zgiO>nr$!uWC{x7{U-sIkj z#A3h*-%8|LG2ZmNWQ4z!CfAVZ4B5Lno#3Z?aqpK5D&!X=?n;W_!-E9h!hAIO`_Upl z0f5z%ehwKd@(*!Id14}PmlGhP=*T`kXRz4AgG$sl--@re^I}C1A z!X6BuF63r@$ohL_&VAlEiq)MGE%Bw}=cA6vlE9rQN6|b$cd$Haoj5dx!G~@Ck|_F}^xT$px2(DP~M}*pPo>5?M+`U4kANB*Tz` z?lE&;vQ!SbZkVa=R$&;V6gUg;ZdGwZOfWiKBHNBp_{R`4L2;M}X`=_tXcnqkQ52b5 zpgP~(9p!$SP`IKv(gJ+O%{yicV52q89TGdAcqoKdx&@NFH*04!z#lwvMN(>Ko$Eqm49rX zczNNUrez@}rTWB5oI_RD+j9j2pB@#O?Qe#Wg8((s(}w*}R)LoYnFlNgse@qyry}yy zz>=GK`U)$OomD3_H2b?`JskPrf)+_zZm4$m-ZkL5=?TN>C%lLq`Z;7 z7d%JQGu)uobHCP12rSJLIGSprX+_{es`V)f?J>1qCmYV-(n^qN0W6uSk>t<6olZi2c= z>|EZ{X|p-~X(w;?m!wBi{T5DY(y3i!GWYJ2o-2A9m(g5+f`Mb?ggd{rbY!?uk+N@8 zO*UH#VU9Egh`;F{>O4Z#Lc<&fZAji>H|30u(P zJyO;wVZV>h3CUt=?xir=!_Lq#;^r0hbXn3Tx@%OtQ*B&%`a*z(yC zsYd@6zuzQYE&pFf^bxMIZu48$j*Hf8k7hGHe=T2R-MwzJU{1>{|HxXrx6o?!fsy-Q zvyADJaz#*q8}K^?FN=VMB-bBwYMxNDWc?Vms=91DdfPOP3!x$4A-y%l?IXQ)OjCri zhlBpu`c!*jl4>GY#7AkD?%4RFCFNjyrt(^M)Kx#(kzOV=3Eig}_0&1-64_o#U0S$g zb7sm2{qlU@mh{XNYai4e2<(Zt5i=u4qCG$xJ#;5e;4X~SJ5s9RVvdZ}2cOasDFaC( zjyHDaD0mn}p6!{rsa1)Xhs~qpBt3!PZ8U5wo7yaD^4Bght?6^!`-xGLu;j8~qH&3% zcLE4Po!8v9F#t@8FK*FVBY^2?5MFTYg1dU20`|h;0U`hK-9ZceMY}v!k1c1(s-qi9 zqwl)zj9m4-n*?r^yIK@@4Rv#tTt`1$22$m(JM*5UfH60xEul>_atQb(P?zXM8?Ro>vN zT{b37CN^|-Dix_hN0_mML}JDF73q^dAJtwPF$}VAjYxx4V^hmFBcvn5**9lP7jv2k z-&bPdcM#8Zt3)mq(!(T*90iriY8T=-rD45*Y_6K_Z{)Mov<|Jz$^10l&vAjytT$qa zqa#aZo@PmP07IPyLP}=U9eWkf%|W4zYt)WYvSIaOMJ|3_@YhQQk$>xOUoIJwG2g=k zu#<8n-%>xGPh(FRLY`NZJ_lGndy#?yiRO|mRP$5m2HE$F$?6HQ1(o8qm`D^X5$03K@aMF1I46r-L;>vLXGYW$@|hWd z4Vt@^`$feU7xcM3BxO`v_0^W|Hqfxs(VVO-?GFd0xx6rxs+|_@F~iRxziw1+v&C5% zz8#zaH1UXQX!^A}cq9jV`hUm`5?)Iz#1pUb+Q07@~9KrU}cJF7z^d z-HtUp7AkYwb;dwwE4qggDl+$@VE!Ola@n`(P-|BrdbAkimM_ON{MIz35et#QJ@_Ps z0B8{g!K(cK{(c{e;cDY%LciGFX@TXn*kZE z2xUnL+d6L8-4oggAkn~VR`lf5gFao{wT%kN@6$0hEY!Pm7l>!imVV=#<^fyPC3Q#M zC*L2J`>b|J3wbEe>vwCo#Mq&YYj8!ZXcEaN{cMCD5I5Wwyw>`DI>!5Alteu0&I!6P za9FM^`v;v6=>{x3{3-vKC_M~g#NC`x%1bv!YgsP(Y>hZ26}R8gF}NU-dWu^u)%euH zLoSh2EZ*?oN66U2;zdpKCo}fdL5dawtQv2<{i(5CR4Ne9w65SRaE4ROQ;0Ikj=?aG zROW%v{{4Tx(Xr;knHt;vIpe>O#)}!)M&6XoW!ISHhlau8C0A!9pGBu_Y0)=!ncAqz zjf=8HnjLTklphR@`IY@h9=>hd&}&<;H5o-AFaM3;25`z@Bbp&M;?lGc^u9vzJdKw} zP6vGeRc>mir3sJswfgs>R$7tdZm8~baCP0VZxAH*pQsunEe!*aw3DaST-nd?GrXcfNtl$8*OGSafvn zYTj_x;ZX8M(Wf7s^O8`%u4R2zw1mR?-mI9vvjD3s@QDC)oS%BW|0toQA&ZGPjzj|4 z;rrc0UgLWz7iM5k;xc=*7v0}wjOJmTs!ETWm77)XQkTx({K^dbptn*|Rc&yjLacfJjDOBUE;rg0UlAwu?pr_}P2kiTfyz3x zE(_vTG5R3;O0B;ETVK;T=U+q@t$Ibwlz)ikKs2#u#k5_y99~>b;O~1 ze+uB)z%j`W&jC~}Ub(z?4+xnYjzChcPh%Yg?MzT~+X5MrBA{G;BTRI2dD?FF=_8_o zooa|nRDz!|D3$BSs>@HR2w`HKVtB{v=!iu|!xx|LXN&8m*_7V~X?$dH_JhhZyl3KV zMFS(R7nTl9Hvu73-5f6}VM`>#Udr+F9V}!Phd424HPjByn@E0mM}2Um%W$_k_V4vb zQHTr_hhJTv+}8 za$;(sqm7IOA4I8)-T1P3JO6vOJynscGk$Qdj0fihxb?Tn5f{h&bRF=p+sUHOPXb+u zXBuR+zn`eKhw2Gv3}+c5_-NJ{KyHr=ENfJ*moaBX&a=0WDD!-KBwf+%w5aw~SeI_> z7Ba?q+<==RZ@cqt%GjLvmDpP<$I}s&>?Zz8!(8&)4A|VU|Ji_ydCpn$5MAxNR-Ttz zK4BPz#e8YkJ*OhmzoWWqPwu)6f4bg{EX8N4Z4Y_|&Y*fQq&5AyE4f&q48AH5)~(Wt z<<6t3v8%c55{e0!8z1o$1jl#6$N4GIVx-#8$0ohEFI!BX`O|QN5ŒqBl%FOh;w zdvSE+-r#H^mQzKPzVx#6(MZLQkFrv|CB(3p=`GhM!ric;`?hz60k#LQmei?x3m-Rq z#4jEgBKO%eR_gYG;&NdgDpJc9z;uR$ zC4;Q*S3Zk;x%DQ}@xssb>-mwqS*u^7HvH{A*-EN5xQG+L!0X9)(^-gncj?N=jpi>Y z#J(E4$jLxx)Pv6*){9@~fTc}6RO4KOe(EN-$6IWOvUN z_;=sz?2SG;!cUIVEx$Q~z=D$60`E*N5-Unh<1}}laGX|AWM49T;tEyOo@|EKOdDXwvb#X zo*6K@Yxk+PRhrZn`2EsCWcQbU9-d?*VyEo2VBhV`)d&0ZyR`NTAob`CH@mB+GlxqXe7I47biT@;{kpTCrqOX< zJjmDPo&j#n&i`w%X|>hg5$UE#Q|YFT-Z7F_%V<$oSbQY;j4eg6rI&ufW!7_F?Gy@b z&Mcihmp2X-zV58UO6%@44GG1a$m?QtM?{CJ{DfmTgJ1yT7NAumb!E4%n6CK+A!aOA zh68Q|7Q)jQCF%{riYxBelkE8g4JIc;h( z+`BZ0x~+U=ttD;jC*J>e4?}V4?VZSPVs$uarpSuCG3ii!c$;>HijCZSu&Vm$PhO-i z^h|!08!obt^nOHII!f57Lf}YIg6}7u9cP{Pf#+NvuG12~3=o?@@kr&Z$biMMzsua# zUfBXih3`tY#R+L!N$=`k^ClSYw@S=-9sJfZA15B_FYyeADLf{Xh!6Bk57XaoeVvQR zmLKQH{2n8>6yT`s+1roA7W5zLSLv!6pAwe)*?IhUr$WAYfVKXl__koEiZSw`$d+F| zFW`hpRdeEreBM>w)GW8y^sB6*&A-*sU@nhGIi*;|kf$z|gX22li8hHd+#c+bWNy3+ z(~0>SL>Vl^XLfxw)V@0VtI#cND)oC%TaX?JIf`Kia)KZwqtDB10{pm`Z$XNR<-n_` z#Ld!gBHWd-vyaeyP*#o57(eBsN6F>CJT}$VL9dr}lYh;dxm4+~C;n5FM(p7`vIDy@ zaSjS=(L=92*~4Sw<@ z{7@P3iHjm>O~3F`(b2C9uY_H~Tuy#)ils((anA)8aU)(XRspvz*}42;ix)*&x;RHt z)k)TRJR$p(1DJhzv28E*@#BtoR^G}OSNPph=bzm9&k(|6$~Po&CiF^uv6hxC_>BAf{KAV{1;2P}*!gXxzTS7KUULfY5JIew9`j`^mV_V5He3fb7HW=vH-X>$whzbc7NzGGA z8VSg&z3I2|&A*r)^<9v-Ni}i##y&E}%{52rf8pQQY}9%neQ%?vmYW`j&AIIpCU!_d zjDM@R0py-l?@S*!2nzM*I<$3ibi-IEV*4SsIa7xiI#hh~dGn+)n(lu_Y$NIE2~$48@K@KpyMd-;G)U zFeS;$|M8+C?>n}OOWtJ(CIy0G?JNYH8KUYrth4~gcBB5s#}lSA2%Y1DZvnRGX?zn| zJ}h<#P;SuIT6|be*V2HL$?n^I)=`h&5kFilYn^foLjF)@nfA(!6W2c#yn?$-59}56 zdU*k)#DM0ebScEu%B{)kL|%_%bS`JJba5*g4t{K(7s;Jc)tl~8g{4|q$Xdp|nQg;P ze?|Y=X2OQ&@vN2Z^|$*H^&zGgD{yuJvtyA}iGdy7r4VbLZ(0$4*kYGl#tQ+{LBD(XkM!i+M4CPGGMYCaY?Ks zzV3Lh#`&YSzk+Odp9|ImKQulj7jW!7IPjrQdDd^~+L;h_>1Vl9sYDi&c-xL769hUl zGa(oDvBHaSiN>F<(fuT3_g$^GlSYQ$wRu2Ov1rOpq%s z{V;r78~A8&-R6{nC@jDAA;nBA&uI4D0<#Il13u_KRfLnHd(`yAs7$;mTk^2hW)zKW zyshr{AKL3u^gsWP4O{b>buOP$-N(U>UuRt3H0LFsw1o@ZSS5l@Q2K*4*&@sGyhNoU zA?(>@tPU8NUEq_8(8CL?ZOuqN*iL$W@#pbg1H37xRsnqzrPc)>Op5FXIG^GR3EAPU z2;BM)enT5V^#3Ks0;GKVXc?ZoSt&XvIo>p7$KBUS9haQOf|}qs9D1A_p|PffSI8GK z2Cxh~(i66{zT$bxv}GASNg$HV!sw0-CfD~Q)8maKK}lsI%oI^27doz=;lf~>&6Os% zrE_@@L2=#5t$Jm{poTA@l6pyPwB03L_&jy?ieFkNd+lr$=#CRl5)VrP%cKK~CE-Id zm{{SF*+dm%d`n=#D7s)!5!DzV`iyQ+dE=((v*h~EGw&%+a);nH{Wiix`)!g8LvDC$ zVk~3vW&<01@?J6R)`U#PF5NA_wE|LkDR^LG6Ct%dZbLAPu39Oi)}0 zJ~(M_*kP#DX-XMwyBO%$PwPk((EO}aG=5~`1_k0~B0m3#Md&9~0*HXJr_$gb+=ne- z;zc2UpHFVy_bf~fKyU0i(LVOXo?|c9uy5Tqy_xDJ>}vN`!v|WaGW4neYswAPoH`_y zG(|k-zj^1moHFG;o3BiRBer=mR$Au7gILqm2VwbFUjR2B!*bZnMC|314XSG;uN7+x z<{2{hd7u=8)WKzNW=^v2csM*(i4%>waA@`4QQzRidG|pacYbr49&%9B@(+Q#BbR=f zeGT$=LnaE#Cj1X7MdCC5gG9HmpEC2r>_U!O$#jn1oZYcWLOGK0vYQE2CidxsIm3Yt zw}E_QQk$3#QsB9Z=gCPtMcl z*FPeK<)2f3u9x>m{SjwapR>%hh>F@GM zvP1PFPR*~$xL#V`Ae>Qd+3&|2YQS-9P?JdP2Zj(4s{NriFHQQ+NgpEj%E=>E5mWlT zVy;nRFG4?1A4=f&)(44ok9d%~n-&6#aKkpFJP=Y zr(`>0Msmit^fk{o6xgYhJYWHLE~`wVj7ATHzVVw^X~&&-6t3|$PAuv4FyAuWK=3;^ z_{U%k@5*`Ei|5~piQCHN0|wDM&5Q{>@&7bmzrR6|H}-t`+z4@<)kW==J=9GIyAu*) z($F*6_JGQbm@(s!-_MZ-JGDWWt)tJJR6_(uW?$LlNB^&Dh&ukre@|1o9~DBDD>|=Z zpA*Jkc5eN#>$hw!4}9ZcJ68&CCe{UKOfRPI9hmvAjt?!c_=MlvrD$INEjtES({Elr zQ55MY_?#DYa|U`DD$Lff1x$J{n#bBu-Xdw;&azvhqq;l20k{X8Dm`-@SCf=0F; z2X*27z;h_Wd3?@{b2mt05Dc)lwzHBUZ*q@B>QV{E*m?ya1)EpTek>E$IT38?Jo@I^ zr^j_pnhIPIe8@I*^}@|sRa9xjr!PBrgg#yCSyUv){e=KjAmJ?TA$CHwuJ`Trh#BCD_ z+y)Oe8@P#Ts@U^e-4OllNW2YIF@6&GyD@0O011L5RP)Soqu$WcRmR)L-qDAaueft8^3EYLn`dbW}c&P2lSgjZqX63``ESk}0O?`)qkk;+nxku8l76oX8$Ibtsg;j?IQ^B!# zb=KC1=2J*QAE9|*6`wX!D)mSTY{^mLK^@jHj%lzXUI32@F9sjHZX7lP`RU#0KCp0! zc+x7>))SXL(?|th8qnW-qTZHNa=jfRmOBx`67avlYD_h%3^a^m*FG33nIWHd8HkCv zkC}u)MODY#{3;gB!i>YpAgE+hql`YHUE||KgkQPd#X=Dc|K9Ke+TN=0zVL&5$AoV? z{l#aV$GH@xbjU%jT~vYt6-!CE@S zxUo}KQ^)8*%y2Fn1peh3$5u9WIvIvGn$rq3o~TdrS4gdRP?=#YzB2fa@zcc_#)w05ZNHQn zPt-!^AA<6uh{L-hQbMFgg)=ryf*@f^h|vn>>%ZqUJKP~QPdDUAaNnkT-1gj;Qyi^j zL!K7PA8HNug#m0&cptR{p_Zo3(oaLYb>-aqS*N72Ex%}Jf}dYU@Q~r@7JK3M4V*_; z^LHB7Wrq<&MpHOhCx%c}hkX9mX5{Xl_U7@X!X9Pp?;{;tgGHJgVY#w<(iKPcoQ9H+OkJQH_+j**8WHzwy@=uKnN)KS`tczUzBtO=I8A z;0@4o($F9(K>D*Fl$a9GHF3IyZ{ksk=86q38+WpzXlmDLOV@`*cn4^KfqCo zI;_AA4Dm|dXG2NiWBifTe(1Xj_n0CAV`uTr`rAVwFt!*IdpMgb)fya{pJ>M6y%I8G~p`^=-u zouL|?>~zN_I^2FZv~3IaZUkdkcYz8_lxoEW?+tR-9}e7%QXZVi?LBXQsZUk7^U=%i zeatmY&LLn1PlYWo%>>rADr)un97>Cl)X^q7d$~D!;$Ii_!|a~v4i9?p*k`b2u zm^S)Unq#BQPWpp`>dAVUhSamk(H)Gs$I+|bgd)t-0D$bH!CK+Yir~-M-k;zA>T*PO z%Cw0By62ua{_E(R$^M$pzpbu#h_R6Y`s7_`Dv15ssR%<^z1uD4+BU`oC`9PF(7XD$ znX~*9&LLjCliFWD#Dx1wELJ>Vh$%lmZ(NY-*f@dA>p$d3WEev7o~Ux}FmqJgJY(Qm zxyOFULv%J>Tf+rOL$Q8`__>h}No9RU7K4m{#q}|_Yw_xK{qbZAr@Kjuig}lOu|P9l z&vnvs3dbY>@J>Lrw`LR(-nQ)I7tCh+ID9GjB;j^fh{AO=`8a?1 zIFcDy@%WRh&pAN!c4!MC8(SvO4)_*5{2jc^q!8f|86NgRuRuy5u-!Wz{9*DvZHZ^8#;~#T5&fYg;R2n5a2k?hl4B)nKUOS3I1Dk)uH*-9LH5z>Q zfhYGW@BpJ0-&&EACm|@(ClE1~0l9$65m=8^`!vIyT3GUS1}|%Zw=iEA`7Tf4vg5*& zlPi303>+6u+w8M0O@U74+PjZ;h9gvcn-tS>&qQn7j6*Ia@jy4sI|2H2P%cD|B*S`e z1#ywpXbtFOzx5Frz5^9DC&e3ipq>_BUz1;xnJlL ze_PuVVGL`2oI?tMF7FM9ZosA}SJ#qUfxbPf84A3Ym~6_zHqV+>2C5@9paouw7YJ*S z8h%ZZ^AIyBXP6?aPiks;`L-Z15NmJUymZC$86V>j`-f+1eC*@k9!Xi_7VT?{@)AbT z)f3{-k`CC$ce(B3dcaVhF%)lG^qLj$28*$z1;H7z9Qr^x_(yy+k>`rYx?8eIrWC;O z!LiZ!!zeCpR5R$}qF$krhIj~3=oGQA2FbC+8o2Zsh zFonw8;>U?GAk2!{6nO<3Vx5L_?SoSvi%5V923~Na9bT2a9;sRr<@eXg$ov^3{X;cr z>3M3OcSN&3a#L9<NW}59NC9-8GF&sCJ z5JfFZgk*8jxD$3&KF>`US=pjsCS}Eq(z%k+<|djVd`*m@-Nnmj{UOp4JZVxI#Em<@ zm;P8bSs}uYhmmQa-7tD{LfnvTPSQvQ$y!*Y6?X{_$q2BOEv4Sigj`ceB?S4(cn1u~ zId$R!)^Gv5Y*7f%{KG7Ta0L~-g;vka1@X|7`Pqa^ysx18aht*nVMqSDTknGvEC|4l z`PBAog3Q3}!NX=YX=1$~OEI7#1xztRCjut-*e1tnl!`g0cub&q)&D}rOV083#cQ1{}bXaK$$>HNNmv(-gb~R|GuQ7Gt zGM9F6?*ojc5gtS|iBQ;K1vcLU_kt;M#YWqWIk~3I4kRu!_q;F_*{8o6EE+kvq_?a_ zx@QZMhK$|%&o|0Hae7WUETAz{Y_YZRaJy>FI33oKzKEz%2sZB2KPHYp*O;m}{`Q|& z40}?^vITJSVU{W;Vpy47qph4E_@Q}-ajH6@;i?ElE*HemzQ`ZgT*W;5Cy)kv;*Q|L zr?&fD4vILQY_3S0)6U`qowK4NV7j6xC=!4{TfJz zft6paxJegf%N}ZP(Dj^pFOWI&$l)UOdX|t01K|`Q=;H+wu_4?(9#l zkQp>#32nmlX3zSL9t)SwaYcUfWa=m=+kFZ#$1^b^2uc7|k(dl&KWZp=daKAYPDAnN->)v0d0s_azMJ;7UM(vio*n8h1!MTmlyiQ? zr!an9dvAbny@-Gh=2`NXMA$q&rPul7(!p;cmJ-{viUh;&^BlvF>I7NeC(jaxhocNc zbXbjtCA4(ITEbN0t^mhR0UQj-9N={8JblC6+;d4h&%%Pe!69ioN}VzJ5q=-z!~29n z1Snh~io`f5XI7XYjir_NE+LE5^s}q*1DA`-nPV!8hIt8~ql@`u#cM~}hbWuE^F?<9 zbqK_^E#Nk_{X+gklfK*#&<$Y`psFMX*gjv0U+7c+u_N!(rTwly7jt?(WA-B|X|5<^ z$K$NfysDhM)-9D8oe)Torxo%o=A|BCGQ3YM%Tr^oNZ=mQk_tsr094xlQ~v%byglIO zaykAD_kfQsw!lBnM|q`K5kVP+4Rf)rl{vVX1pdGeM&V>RoQprV$@YOuJmLT@!v%PL zkTdWU0kGa=Tdg+#xLlmrdKKkTgT#rF_pvgBHt+iZXY&+H)D8;oJ$= zcKU`q?lw*KlNSlOhp|3DEzOf_LxYjcymZ(;19#(oQ=wqjh;6B0V1>Q!*XAWdn$UC$ z62iLxy1R7Z4X$4)ikBhL3tS%1dC`4a{)^~%_J=(#FPO+;bY96V_OPPX*a7-DhB?y> zFOE8r)srDzk!-*Uq#b78x{pb^qhBgkwSIj)Yvy|%DYo?$rY`Y=Nj*oytHaR^8JLaLeJuap%SILc#R&&SOO)XJIx#D?Z{Xk~=*0Z%g9I8IuTs z4Q26&i(?v@*(yHxDYb9AX#zz0Noh1CM-=3SK9J%1A%cQ7mhX%wnPzL*A2$vY-xj2J zv(R=2>(_7!VgbN{@@9Jm^?l3ni9xae@L(g}a$H8CT}bwi(e7ALnW}UFyrq1k-apF57iULR zpb|Uw7Ms6^R8m40g{X$u=%ae{d|;b_UmXr@5fCJCi0A3rW}Uow<%?(Dik%h!gdrUE^V$MMm|JW4YVoy)+kYbS?E($C z*={i4Za%6X=rbYCq4swJ_KJt-w3q3Z;B|n zz!g%gEVDal8o-{jnJbwq0@PbSn*aKCOQQRz)GArO^Hz&F2;aQdqhnJ8&5IkCWhO>D z;yH7Y5r#4#(cJWqs9E>~FFot)xTDW_7=p;U%>`}gY^-oR@k~AA=rj{h21?6>Q@r&8 z=5A|tW^=|sZk{@;`GG}6LDx7+_^gkflsUj^s{j~htd*EqJb0C-12vaAH*i!fhIYn% z`Cw5>!x5aP@6@NnucN8+CZ(}Qjg8>7U;G`~o4BFLD8koIT1G3lo-v2#D&c_MNh~O4 zv4iY;VA~9Y`k>>Ad2i5pS*Qt%YQ4w1P@N1Sj%?7grSdKlmJdPo0y{q7PyZFmL?|9~l%(s~S6&>dOnuiEpE+h2d+}|GQdz0os zj!DVOQ;F}W$7go?&Y71!&-{mQD-9mq$r$VBe>z4uS$Y zGM)6Kp6`S*O*kKhr%ix1&dUfnu-$DPeQ=o84JOc~J@|1u+Pfwt@LaX@wO}a@-FlYR1d^JoL7)Cw)0oR-9m`-!^*R7MrsRFww9c~oU zDAy_Z6MwV#kQr{NV?u!&1%2@v6g6vNQfGKt-%#`HhfUMO+mKzMeI|ai@mz)oM|@->ccZnm5>9}ES9cw|EisZ z$GB_SUUm!cpjFoygD;uG(~AC8)xc*0L?7rgShA;UdC{%+b4H^ zl!v8E+Jix!i=X1XeeP3mJXK43e+q5>>+=Z>70D*VW??kn74sP~-@@9S(~~5DUG}>Z z9)|pG^r3WM<^&d%&+OCEseja(Yb)H^C_3@RYOVLW=f8bJ zS*6j_+!I0Hr)hBM}s zVAoUN48QC=L^n^j^qq@xz`|fHN@=pd3cQ`qc@6llC(`HngKaXN58(RAUn5p(AKBiA827e$hl#gCYFZ$BuH|uBtc=lSp4I1 z!qoQ9n3$7@D-|4s0;=*OfM6Sm`hLI~iU%y@C2=DKPj{ED=uisR-eS8Ev7idf=&zS=iDQQ z^&j>kF1xi9QbH%K-!6CmGOSXz?8K-W7gX2T?fa2N_WFEVigV9)M%-h*)w6r{+Q+$F zMKeICr-H^wdXz8#X-n!Mx*wz!p3|F0Jv%z{>*pNGR?{6()x_LwwRtsKZnA)rm)W^y zT^BPR6CK0j`bw&AgfKUL`jH4}4qjRgTsiep@ET5lQe^#ZIPi`C?AoccD-ZY>*nz{v zsZ;WrhjK+e-%p!LeoM{numbI_JrVGEj|>1m z<7vG=)`+^cRB;-fW|Gql-M>H(rPJ_)fEa*8FRj@7g zej0{;Z*Zf{^)rHt_zM11J|t^XG|g5ZW8Q);(*bmIIh8lRi6@nd_i9%gj_3&=eI@M= zfHAAUsIUf0NlvUo{$aN?lx(@9z5>AyPRHNr)d_pjw_n%i&K$TeQrEH?n;7swW+e*9 zvuiT@5dn?Ylgc2FF}F%0=imzX@8`Vud}Kz4LaLky-Wx6<=pW`SJmo2`cWX04e*ZK9 z1V(`yeQRBdd)GC?!krqE1oAEgp}t)(ZrNdu`ly)qA>Jn-LX1mII(VKx=ATjbKBX(@ z9><>753?_Lts_QM7R;8d20@8KAqs^_s*5QXH^Wc$9~!uLXXh_c=l;-pzkKWXbz#OJ zph9$cY2w?v5%Kyh>IX==ToEuvX_+6uQZ-m( znSlr{um(`8Y}c$4G~KU@Lkqdcdkz+{-RP0BRHHr3tyJUcT(tX{(j7Unqhd3`c>|#pH)fE{=0uGXeM^VCFn;ecA$kvOAW= z+aD!-Zt=%5N$lFY;zx_}Yvs#>EpGqGyJbCSFgo}@#}AqM75?;%oVxWmQqSr70S_iE z*rc&_$1G-(;fj9%9tBjk=jt}7xdv9o2xb18zA`)=QBh}W31wVmK!xM|C4sMh6C;WI z%inE*Wsa(IZT~T0Iea?w;jnlFYRasr+(tla{8wXJ{iPs>_#(8>{U=#JJq$r8zj%%V z0;P~ng`FHQ&4i3^etx1(7~y&O$U5p@%J z7jVlT<_A)fQq1#9ZXJFuNC9{;OYaE_tk{z=X0yepBU~*9fb7^n%emMvXA%&oKV)4Z zIw0*?Ah3H3L(#eMnjTw~+mi&KBNt}x1!_bPFYx}C8-t-}F%KCZ21i^Wb_CS=HYLhX z-I~DY6A7}bG$8>-E$I^4If)LqICnLY4eIG{G2{i5J+R4~2C^%Xzodp9rYC!rlrxdw zTo3EF%n*O}i%)1ul1d)_`SFVml~X}CO4cvj*vSg95407igT}gqt{ff_%j!9up@II% zM?a4nK;Ls79gR_P*kZfurDT3G&GIaZJly@59CPD(T&sK*glp?U(Q(Tj?vS&{{L?i~ zdJ05{Nux(o#VGHR@nVUW29RF+roHwxxXPLSPJ>GBk?)c#&eY^=%UPYE4-+<8_GudW zxB;yjOLj-yPW37BT+>!hH9ul6>QULYbzoiq5)u&kCAp(OZO{#2FYBr8daG*!F8*(F z`Q+bco7Ru`K2DV}k{sjOg6{9ALr_$Go>J)XZI>Q7I)PrGw?gm@#70ALy zYjzky*iqN`0Q^4ytN{vN%@5!~y-93Hg)ut)#tKTa0z;TjWw4n^UA}MI`yJK^uW@^7A1g~fuT|C8D|+_a zdsAp2iTG^0b$ty}fZs~{&-lL{qwaJ4d*gqR3~foaiOF`v;bZzMk{tUmFOAOAxtL4e z{=ivF()e#_qKoIwOZ?693!YtR>V$j4+<9L-dehTx@SiWvNmHQP_vX&w_Xao~`ZNAk zp|3$R^%@Stq4{sRA6H%qd}c`r;_DTnzbfZ}7RPu`Xe+Xuhot911T2XQ}eS6_}{Xc zN^XhZEHW=*g>_M~RpI9?8Rmt0Iu~cbfVkKxE!Z9};P$&5endCfS@vhAm_y3uMcApi zx5Zq1AR)@=0T3VdUe1(!D_lCpcHwuGj*a^|&M0Jt9$SGIw|M0p#Cb4m4q@9UF;{V{ z(ok?b?dvN6z}rm?1|mT`t+CN>;S9G+x>qeelmUvrIB(^HLk9$~XK^zKwkh?A$){F= zZ0Ad{+sy?e7g!8x8*I=EL{HEcUTeS04!*0cSN|UnB0>4Gm91+@{fi9wwK6tXYwI09 znM(;h7QLu$E|Lw(^DJ3qV6zUV`J91zEau+wHL>?a7RJVH zz2(SK<1vwAXd7XbQv!drlSeL`3eyX~(hOv71k&iRdP*pCR9ytO)Lda3GU?jxXqk0V zPa(E3Cmzig@=BuDV(E0T6VL0VHNjprVB0{Du!dZ<-G{;zZZ$`MW{M6=`}Yu0fuZF3 z%&Hfek?e>X*A~nsE{yf}q-CWCpd%af^Sf0Kia^O+^8BNST&%61?!&d26g}hb`UBSp zWTuho;((zR;VTanm9853n3*kL30shIf}MGQoqTg`=4o1=W2vnGi`)2YO*n{QC044% z5Dsu^uSdT-*~*7qjFxvcxEGWu;D$BS9mIG7UbS(qs2}mP!<)PmI~!0iAG;`=Ql|AY z`@fh1?sHsF`eq@9qAc5fsce#O7|A7=CIwJoRNFIL4Hz;#B(<`{H+{-1lP+Uf>0Fz7 z&cDtpgh{OeYW2RB<2FyD!es7oECxNAKJKU@7#rY5lkPu9tbB9+dlPC=Y5W z9h@+%tbs4^{Vmu(7dza`jXK(QVPqr1|Am1ArX$huRM4AL11Cl;fi6ZLZFLQ#l{hZ# zkhGM#6X=>d6_-BNBX|FND%n@mb2=FbcCdSYfqPMS{y2dB=R+A}Oz`$OmymemHs5{57H;8f z$~4z3x_|P?^W}Mcr|1JvsrfYT_3|44yF;c|?8uSL$S)W6&R=rOGHG4>_P%~)Ji1X5|;z5FYK>Yflj2*o2tl0ed zTu#(x3Lkp^xA$W~`8oXWuvNm1LJJ#*PWiQP(wxo&KfRf7H)PbNWWu4a!NwlZM)(@; zRi&Z?Lmi*8QeHcY@q+s;j;8H=M%Yx@&-szQ z{78i|D9`q~Xm8mM>xB)zO8$^qKo4Z*S#~c-B{ucq*n=(uoOpYfj%}myc7-Qp$`VNN(NRbLn!m%B`@X@9Y1RYUow;%}&fA=kGy zYKTyV-eS*+hS*oN7KNU<)KggWWCsOKr5O)h90%X2kK3M6GY+#XwdGj|Iim|K^$kIK zkgLv7P@T${?8Tn+*IKmSojGN9_cIlvL;>gkE&fyGDTJd3Egt+3@t{f~d@rtp^EOkC+>Kas1sRxmyPPf}+>PhhO|A6wL_ZGw?* z3&{sfK9QrE`5EBAVHll{GP)DhZ5XTXp(c*UkR6?DzpB*&fW8t_*0XG7T!!@1^d?tS zkw6r8O8T~VU)5>A-50_0-Egq*Yoi5Pkn*9(3L%K=@x}8pGKG4zA$gCDsEV4cIv+kz z1w*8Y4RB6CjAWRecVr8ue@N9rMJxYGzD4&aWzGBw^=v2AkEF7i$chf>F`SsPq}@oU zt$)5u9Qa9`wZCnD|CfL5qri9@fg8GW7KqGn4Fmw0 zr+14ypO7A1c;o}Ux1op7v!SLw7VW~2Zte-wxu=9@kGEDpkSQC2fB*{7VtVqRRIbB~ z)(S;Hu`yq{gh_0Tyz7E>owWH77-Ho|G1hp?6x;R} zo6o9h0P+U-k4!_{!h?=nzpp5IK-Sz?N>nx>{)V7?Rs3_7+R8@+O5A1BTvSx;D>u58 z(&Z;~OWM>?*2~YG>#tDNUBB{M_1hEXKBF|BVJP40@(a9SU$()`1r}QSb~#r@sk?q4 zyodaN&Ws+%^V1BmZkdf(YJDSnimA_`Ton&+Vyrym>-W}}%WWejHgZV0{0w6LspC~& z&FPJgQk7Di_)r`i8Nf-smZp}6;sKmDoI-YY6*(>rHC}+A#JvlToP6B?Ht)Qv-SY)0c0_|gr(^(}nhblB zYh9t>ahWX4w163ei-&u<%7`jAlb2QM?O7V(326!Nv(EOMz&*{&y7+5G}NA@d0YN>rfXent4oB-h*Ta<-K89 z30Z%^AuLa@jpi1pdkfnh?im>?*HB_Xv{G-coTs#kV_P!}G2;twbLN^HIs zqi@ch)4=U%)pe2QfH)V{kX@*e)q$gGhW&iXOqPp|@36_~_KxVHbj_KVnr0@7iau;j zqb*zzd&pxcd}Z{zo^!nGd^hCRfdL77UFnvVpTmZpAXhH5;19XFPy1iu8um=b6=XWa zeJ^j7P&52R2~Ekgfh-alFD(Kd`q#CVk9mTrKJ>S|jf_+SWiJXstQMbk6b$@*MMvA> zQQY~l!i;|!W;|zm*kO;O6is z?>cx~&4$b`yZ6s%&pRh4`vdjllLtsmaTudE;|Ee*PaxW{jpf6@bbL-_I1{X_M-#4pP_=9)x?s2 zYmE)S*KbINopL$>GUt7QA^+CceCit)dL)k*IY>$$&Dw{?3*{!8MymYx!798h_UA<3GM z3(u#DxXe8h6ANHVe%%L`YG3z(ya^KTUS}To#is7T&o#&V_`CaJ0q!TTlDvt$yTRFo z?^fOtE=HdX9}Zg`mELLB8oet|RbF3iX!~Knm)jKfj9*ifUk5vPcD<35I1^bq7w|W3 z-p~lEV6!Cf>zeL+sX?IvyT2z0=@OK!fT4KWya_9LyYWx}?h7viP#3m@uHgoz&T%7? zRsbvO0UkrYyBj$7k6MhAg~?dkJ-PiC`@))Pw{r;$!)4htmssgvbAO>3$nw<^Kx7+2 z`H=397YB`6DRO2CcE?(LktH$zzUX?E?ex{wL$i+b9oMA)R&L$21)Rc%fLdQRb7)=c zo@mReKADGJs&cF&od3Rb{CuqdKvr}D8BL6{abE$#meHJ-_yXC2iza&#OSe0#?KV<( zgzKyjVauNuC%s~B*}jgYj{)}Yt^j;#>&p4m8L zJX=^p8iC>z4}d%gO|l-^HMcsLd!XWK(ER}7aQak2@oXFKihaLFh7(_rQ`76_-p%a; z6K!F2UTVAbK8+9FkjpY27-{XhP28)g*wfG@^^Gj;?F{Kf1G(7Sjj!9#J_q_hw;#T5 zN6T^d0ilTMIVQyRls_Pnc!^Vcr0s1$#qbBce#J0w?z8^Mx+~?aK-AESSu6t15MyML;-kGnuEg3IgA^y5 zaVby}D1i8i$Yf!i+Rd=2Sm>KIu8n}*e@@;si_P@e%h_x2@X)j*cKwT~Jlr7Z;wF%p}u~EFZo2 zBMJtkHkbkmw+hZjDWdZ8qG}=g0xOd*tv6?yhi3Sgoag5nQ(&?8zqcRsu6F^fUhLM@N%Emz4?nQySka?HsS6wToxR*3Z)gD&*Vn3A zxLXvaCPIn$wUGzF1N*>AKcCt%GD?PCNR78=@u|jX{*-}X`Yjx)AE3|4!n9FB-q(4a zr8FJg>X z)6#i&{Ifp6d$c>S>)LT(5_R#hVjcCx4ZDb6JA$My7nSt{OF#Zb_qqPZ{Upg^yR;|7S#_y&l2kG zFnr~~c4<;>_Ol+>iT&J@uTS_xhpkRnsDSenr31$cC5~P*lkr&`aB#0%dr(fDiN*%AK!g-C}RVWRb* zem(-$F9C(_wrFJzPrBX%EHvjnt9z6_G$oeMA44m+jG!U}lx6{^uXW7b>88^!Vq%@I zl}_&IOcDk-hzbD%%C^{w-sMO2=Wn3XIk_`UYQ+xjnv?R<0(G%hp;5QQVlj~D(sEI^ ztZwS~f23N`3@$lYW67@PFVU?hs)o8x8`^*GWh{G>dh~(V*ic>D|DNJ&JXeri{SH@n zEdK*U^ezTapzjCX%Ur|*1O+-_PPOg+pWFYK7t*Ph-pM}n>H45Wf9JCMJhSyxbZ2JR z7rgte%?_{e%Bu?#)jxlwJia)dIU}3wUShnqnyVehrxzFeN&*}CH~!*?TCS(g*6WKF zb|W){Srd2b!O ze>(PH%e2J%QSjBjf8CuouBg?3S^C(D9Ukf-<4-R=f1WQ?mddM8h9bEFb@S!QI~#N# z_}IIp%lhk>MsT5^M@RNDq6VW^;mExV$nPTF(r(p0(iY0q_84Aeq+Gwz7bsP>O87Ff ztJf`ASarq9jbPEio-7pe$Z>nAa?dflnz3Lllx8Us2t*e0D07VqUS=t2vTpC34RRB& zrtb$Y2Z^4p`qTg1KzH@|Ni{zuAVroM*sTXNqMU%D1j8zWoBUdqUpeDvDMZG+K4< zoSUV%QW*jWx}Dc-Jf@Y-!>hhZkpp3Ct^Jq`XkE72&4M9A(5!Z} zIN`YfM77>DjaKiXu>Dz#aW_y3S3esI$D9}SfF|8zZC!pKAdUz0VNEawGQ24Sb_O_0 zyZ;{16D)By5O%0~B5mE#*>p@Opp09Va=23C{BdGz?@kVo@dcL#<|8QH-DTznC zi|0Wc)nOEP=&eIm@HLMJ^T0o7_50569((BL_Gt5#g~%Hbx!JWO?lyEx#VUM6LG;Ho zq?{lvO2W{UI;YNk?Q8DoEUJ9$;)+qDhUe^&2@J$Oh^CY)SKK2%n0aNAw5Iec`mT;q zQ>NI&dgzf1U9|#1?b>V%8BiaI&HLx7BqC$= z!3Nnp?O4&q&(IuSUs>GempkVri`CbXFn_yAvo?GnfG!cx5j(%J%oMr z$<8T4lpIcEn*0Bsw(~dzY=q6>(kbaR9P;eg4+!eps`5v(w2Cc23&z>%13&qgcx!4| zRx(7ezlDIe_xt6taVntai{r~hU+wGQl8kNnYW+w(ILowQ((VR`du+)`C$^+Fh`oPR>Yw~?>p zuw~3Ri?D9g1%M$=#+~s9Ag0odwytcv|67-+YMNSTT?i4rcsxB(L8fA%)^R3DQyt!* zZ5ZZ1mMsv)htzQh4mc2Yzes7YL-EddQK6Sk!!PssK)d$Km0Qg>gim6MTs$uS;M4_= zNLfL}Pd!j%-rDxA={3Bc)A-^6LWv`<@p_2;2K1T7k!?F&8_l2Y!~b?0NQU|; zoLf1>w{xcCuD#EFPi|yeBjIKE`}?4Q6@ky#-|74)S)H+(C2ij#Edc4LG{L6~9pVCu zLl1Fru?L0#hN7|*9|O)Er7hg-0K_ENkW(Fb(gmBk3#>j!1I z@DpmIa00@C{}=4Y%00|6{Wr};<1ql!*An&(&1k%IImO=rlzjQmxNAG4gvB4A@QFD47*Y{djaD zlF)>LrnS~2*B6X-Z(!$sCs{VQM-=N9J2q)bo93M(kO<5crbB3g4h#`Ax`bo|L*57s|a^B z8h4mZAIr_X1r?XEhO$Y3_6`t&Vj%!4r}~`-i>P`%-lPjWiBuxP=TFLHr2oHAR>pkW zt1k#I7yd>6@^jmDYaMM3n=%&y$_eXxv;+CX*m2ny2c8_i41M{c7oUP9>hR5osq&%_ z;PrpLIorFpxJz9B>4V zqL(=$3SqRx&&=E3<4LtQI&pxg&UOuFZG1RQtekt{?1)c(772SB?ojv3MkF{nirSbS=skoTB_~oGZ`rY;@{1`kP3_kgR9q(}#?WRBSY< z$M#B~UA5}oanPr?T;3V~x8riQ`@&IZLezMd{4YKlcaQ*bx&4A(z!Y_$cJSWm&q4IK zb?e`U1aaqi{kCIZIYKHbyxCvqty`yRh!^!P2qOzpm+uZHzfPV2^eLAT#W?{L3pY}p zFGY=5XpF)Oynwt+QL?nZ zIO=T!G4;18;BL`LpCZepdjYTBc({t{2^hv=e6R4lTn}CA`sEG1e@EgvxEx#}?h*dV zYitUi9niwo-~V?XcpYVdLI{7?Kjfam-u@;{eYn4junueZh`VZV)kXW@^a)eV%LfiA zal>RR1#F&>cfC;DC4eA&a7TrbI@3uQhOUxhvR z{z;S4rM0IhZDP>icjw`?qv}qLu3k=uM!#_t{NDHev>#DG+-$PTiz4lPgtipau$ipg z=B1^my(RSUt>%*=i;+LK8qLBar<}92W2ZSE^%Quw?K>QJG*l-rLH<__l}Q6*wn<<# zS*In?5xf-5i#I)VmeP5QH@n`SD;5G_F9m)d_nQucu;04-_$kf5%{0+^^`Sq@Y2$r3 z#YT|*qjoWySk<(4i`IOmzgjwbdEc6y&WER-SV%xiGSL2bcZDN!lK=LiCZx78FW~h2 zQU4&Oa z7SMvo#3~gQ8luP;xN;M7n7)?V8Dg`2PTV!oxJgW^d5!2yb9`FQ**7ZKSD2e4ak~|c zO)Vd$c^-4CoFmSc@H{xrWY=Pn0|NFkBpI+ozwHXyF>gVO1cP7x|MWCfs?+uP0F%lDbR~GS$jI~U}NFR_bYZ@*;6FueTO9C*4%5e3*0BB zPf+I23mlV70XT1j&e%Q4rv?SQ5r-FUt@vxiss|j0u!mm48RvX3;@er4WQZGe>4C=f zlF|XroJpq2^pi<@&!68nBPZ)NUYSLVN1xSDAKBi|-uJPVq@!=8J4jC3qM`O0v(=Uy zEv6m4XSYZSD9p5^b%Mvc?UPTDWCJ6sIl^+dW0)w;vezdpcBR0RqO*q3b5kx4JIF2?jV5I*N5E6N!DT)NpV8`4_{#MLbv+%VzQIj(L* zMpWLj&txTpSDF{JPetXf9&-xHH{1UB@=|q*FuAw$OdXI5Jxx$>?V9)*&u?pUeW{Tc zyK{8OCbyXNoizjt;f=p0N#FVhuS9$`FRXQ!a1K$GL@XYdY!*{bM?+9TsPP8ih|x`) ze5P$f)P>k*NSvdLaEjA2eo2ft9_Dkttf4eoS)5rL{h^KvLvbvC3&1gy^}^d0KjQOW ze0gW0(*xFC0fDZ{T)t#-9oOJ>C;G?h4c5%Le{<6I^6$KbS0g&-&e^#g zgJZ@qv8h!L3iuB^Qhmj9vS6`pcWbQVWl40}u3m4u$y#7Owgn0Jna=j(_ZSmpA-*V@uNwfot zN*mKOrL{dbva?f_C7D}SWE63eKD*p1Y2zG8rxIVH_w|iP6LOebhx|;#oM!?Qi=<7% zg(z{5_EjV0HTQt6U&DOwc@#FY)i$WVFeESD9)ls^17tl}4n?7^ibrTr+yqJq5l%uu zWS1Z}micgr{u6fLXA-^$FknCzb^T(Hr^t9_ptK# zkrUa~QcY4+8(i7`f?32GZV4&5on2J1#tw5F-VHc?d9dutMvHzqk(KimS+Vu%^Q-p{ zPxLF_8)6QXx)j@;m3K;rxD1}7*G`19~h4=!zqe{HK(cN?SegdkEm1o)yAAq zU)m8OC)@jl;p-0G?dX!TAlK1cn9Mp9PNj%+dIjKy%tUOlpB!@QyxCiv#NkAR^V;1I2Z>0Mzw(8~+ zOa~UWkystAzN0%5j40Xl{Nt1<#xPZvO z0A?juak^wzVI+$$0%6(99hFCoIQl|sG_irafl~=WK*$l~@sf&plQ7<{tD86xp^(~2 zn98`w^IOFjMJMk2`If8%e9GDW#o&ddF^f)kNsbG1;xll_9!k!mYd2wYCE~tR?z=U= zedDE1EJ{9oIK>q9d!6_4V*aZ!m76&F_vr}ntt>@_BXnn+-P2V=WpX3`p+Va!z0^@lI1zqVGB|PL7D*#oSN((1+?P^!XFlB3VlMT2R||aHab@|D z^N|5w#ph}*GyiVxY&TaZiLBq;zzZW~TF_0$WLo8ZTZi}GcEMR+`qLSoZ-)P#$A>q= zubWbt%QGD?#x`p&!voDmQY5CC`iyhK*Q2YIm;(^@A9(78@Ww|CGbR&8DGk?%^_ETw zI8BF2_vyPqgGuazqYI5pIYfAWR5irqdFs=MfFJiv@)OW4ud+#B#C^T3x@*mz{9e2q zrjRD3&X#=WT+Eps+4#cA)Rb)9pb=UnGH=XuQ#BU_GohAOc>L3!$CMZ#de zDHPvfe_O6>5lV+c66x(V9Mc7~95F{TbLuOx0`Ii{KxD{MWgePEIJ3-c?hJDwe61J^ z=6_~c)+ntM47v9-TPN@wDYr`V?u<~RqHFXTINl})p;b!?$ zdQ&~n?_%)mFKu^MQE8S+%^WEb9X;87%jo=8ZPeS>pM>EGCpM5rsXq>VSrq#USJcvm z6>r%*;aN6DDpMQ=Lf-@sy|lz1NPUe43LHoN4d4$#<*8K zvX~mc#ZHCuKsB$zFtD&&G1S%Xz<)LDbKT>!_`|f!3bBz~QxLJ{52u&U1^G-3gx?Cn zusl3)3$ySVSyA}(`3A>))9F=Xo>lLgqJ&#}!b7OEls2#Xm4(#Tyy3of)xF9vMQcQ2 zx_Wc=vl~G~BMSSGQUo-4LO#2uaYx4^Cs-FJO}Ruz9#B`K~$Pjcne5^ zGLLwr=z0TuwvXYH%>viP`J#clQsf#5TKB>#4@M6?K(K$(+P9wMnAZg$4rs>|bdgY( zXk9Q(W0|Y>Od6g9g8vVvk$hfT@6iD(&gY1Rr8-SyK<~u+x!1|HgpOHORADZbsTAOpYc{G^E zdn?!7&=zK!&O%%H{4&w7S-(JXh-#hd`}u}#Tk$$9`!nhJt6w8?Z=RE8;(M)yQ{^$v zd8lOQGBjD8xI43a{0$IdEOPaB`wQ91D9Y!ycemQ3&mvGk<;z?7{0k}NaZMM527FFw^ARr*o2+kpCT|n1R zK&<2#n|CpY#jC>P{vjBK>M;t0ZB^UA`;5dGGz9&F_D}lialhv$xFHwK7Y$#l8L71^ zhbXrB1QnWswBzv7+>`0 zQ?J7dt8i8B4}?iQ52xW{9CdTG35S&=E=yrt-a!=s^hO5XIir5@p={jDmRwbkr~6K> zIL0=h;)!6Xc4rdHa}x3W*5Gn2{w3^SfhIgT}G4Ck4&C zE4C)G_97WNr4w?7>YED)!Q16AN~g+$RMeqAhp(uNRVhgXomCmhr%AzItU9HlB7*D8 z_V-qJR<3?_&c1=ZfVhq-nD(WSc-%yPNQ+regx9(is#Gil40GMto3{))Of5XPm%R`( z+L7wE~QS^ow2`w9-xLYL_!&EIok*#1tg!XPz>A5 z`ZSib^*R2%m-QH=L0mTi7dro4vlm2mr5lLp zX1CTqAQ}`-q-1wLUoGuQPZ|)BjNVIv-r-sM>bh-a0yDNUb79kdxVCGID7H-$=nl>3YZe+fAwRm^1!yIJeg z;2Fiex|4om#%S5-+YPd6((kTAZ=sQF1X2i5G4($34a{g|$79S}beV;}2eiJ>-P3f??mR zqP$@1y(C#VA#1$qjpF*kus4&pKydN2GF~1_r;nli6UV;l3||@9RuI=qbU*G@7^}WX zYhN=9zX17WvS#kGbIEl8yrR5YCJEO9y|sSF=s1#ulMXl4R8_CFPQJvwMB8iMHY*E1 zUA_K<*R4}nA`aSnT6e9vh>ogqhWnuAnv00N&cYob5Afe`o98QtN3MHB?$FNLiz?wt zuaxbSL6?ca%U+5XZaQAqws~#q*b|dla-Ad50D2SBH!;?I*C>VlmF5pP4B=Yb^L$AO zjH;6Z#4FwpI)wkgIqNudKC!I~6A%jr(qCT+f`=JOhpdKYjgKyF1o|g(`>x9P|9t%= zB~MON+^qa{*EGcF>*koZfMr`YG$}1Pbbqf8kqTsT4&GCOstP|_+`d}Un*Uqs+DkTe z0U?M4;jZ1$3I)I2jn4apPr7jN$Ddq(A$BI4_B2Afx5j6`L*%98;j$a${T=+V z>#S%~v?b}XxkUB)ej4%cIndUi5qi6gkVhzMtC}7}4ncgczl~Jy@zo4x9uWHVG0bLpWl~^I;~_gxd8(8RAznK9cl8{Z>>o@S zp{a!$dFZ7=Vp$c|%P5l&=#0zfym&UcJ?%ddRRNV?UWNLNMS5ZJ1hj#RNH@;Uq1ufh~m!NTWfzm|&&w}wVl zjb%-wSySCtqNJ18*Mj_NFm-pz%YL|=mqGabFk(i%)+#8_nlB(^Z(*{nM<`M=%0Fyw zu@~*M{(MKg(`Ur&`?T(oQD}L=(T1d3O;3yV!@he31KY{TIx9-UVSY0%oLu|C%z)`$ z+xa7o9u|9oKh4{Azx1R^)ONO9QR&#S_?}}i6u2$053XCgf~L$yJ5yzOUW5zoRPN{P z7g5t!Us4l7l%Ao8bg})!{d61_KbH+5l;`V{VbJEZ{n!F6Y=~mBS~duJ6K4%9bSD&Z zOU&LV$u~~7@w;n?RpHITyNas~`r<2h9_48%Ery1cqOfrc)kQi4 zRRL(P^dY~S{FO)@BlO7qsP-!=`NOeyhw&0BAI{8@PCA<7zF<$QFM8&{;c?I2T=U7^ol^nP1`vh4nkxgr9L5a6+|h=MYx%d@pCK*n1GRJM z?MJQ)A6hlkJ>TdX^mJ<`^C&`|rY6T2)69yyF}HjqgMS(Mh3%t6{n^Zg-NG z{*oTVxli0d0N8Sxt7J#0 zS|KMJOGlJ@5EbT(y;-_(d%rEb~%`VwWtb=n(M zaf>U>wM~s0I{6e12iFbKi;BKzG200>i7q|Ns~AQ#pFE=_dajf9JLgBw(Xo2-FG8NVOb_k!Nms=K#ik%`-CnW_L;a_tMc5P z^PECCH?}H7u;oqyl6k^Jv8Vdt2YK6@S#dMV$6)5ag?xR~tMT%QLPE?X)6ZlIizs8% zp^s}b)?vq%sS(ww+8llNvWaUArgA97mpGT1IS4JH`tF$!>g@Z+3uB%U_3Wo zn|APq7jba#W~#xd`5@}Guzfjx9%7W!wD${$r}v1+XSc*?N6^S!UQrA>2c<8%UcFUg z!~BR{Aww45nkZ=_2qwHJmoWNZ4KKoEb}eY&@ZY!0 zd~@~=oyeg@Pb`2L-)dYLGx%&R(z~vvh`NZ~SX;v2H!oYjr6{kKJ&OOjxbhJ6i z{P%^=#S?cVlc0E~-7QoI+IjjQ2ShyhN_4=TBJM?ErCuCtOZ`Yf9du5ePLoPfI(SUk zZ4z0dr_?YwlVHi7d5YeZ62v{K5|Zp6OLA)29h%BURCdqq`!fAoG5xsL{ww=;BBvWyS1vWQ_fH|dLJ48HH1 z1#{>sCPBB;f$Zd5OBDwb?SSC*n0?<=>CLAd`GGq1OuY$4AFtEFw07uD2NBBfjclM4 z{acem&mLkz@#DAGjvzKa*e`;y*$a2;XiGpa*fZyVBfUd-dWY`4yZ}sp-EZ!|U33{a z-h$7D-B*X9>y$IQn+6KB)&70xbMbKC#0D=CyhSGV!h~5n;nYaE>K^D>yccwZMY48k zu2pyfL_W7wsT>5yO{eIf^VYNxv%TTzFn|(M8V%W1&i4IpsH}^`gL);c4VB1x7#g_5 z0fuX8u*u+FhJD29zq1E&ReAz=0iv#6X?$q{6fY zY1m8~vDhs*t_9=*UfcKw4&^_?i6DVWd2a|ln?bRyY^~6i{@?d^f+>@}ov2;fG@?&h zB;??u6JvZ{WT$kE8-KTMY7SIyogoofA!n3J5u568pyq2nv9G&AJOFlw-vEw7<;;42 z0{Yay!Dhal_g;^j)dE_o+GyhFY{1ww10Tb9mO>|&G6RKy^Pe}h_@>w0O-TGWDKm2R z-&2RW*4Hz(N&(ii=|8rn?YlIY!i|TTs4DF*^*9TYp-jMSY^}@vk+q7+H`-B%T4(a7)Z$q~HujshWbu2r?3o14Uu?9nK)VRSuX{L(b_ z6dFwYf2}LJscX*zu_NM{LYd41MB|0nnOvWM1TC_R->OhxPFu=t=$=zV;h<+fFj*p z%LvjwRo3XPMAj`t35yaJ>-wte(z}~@qTF#L5}x=YHWL9V6?f54u*+?)IvY_4c`a;gx46sF;8g&*DjM zr8-(qko)u>x9~>!{<*q++~ySV>c5c$J*4IwSUYK3;!X$(3u+77z!-93^fSb~E3GH$ zD$;?;%(b5ME0V{oU9felx>KAa&qk5MLv}lpv=N(uwQemP04M5^zk_{AHY}^hN6yAk zTFb}j>qP?&wd#Kq8!!0=ivMLPJo`HZ+gX5%B>`$`<3Z_o2B1my)Re$J>EH8*)VYI9 z>g?)h0%o}T#J!e*)&s{_QclSFoItp`NWCoqyi5xr{RRV1vU@a84sx6t$rC*IEx^*( z&tv{`9FMtiB;c!B@x;CL4^=cW$re(JpJbXUYoJ_!Anw4j&0vFI;(<^VvR;n3H(a_l z@&}JhBd*TrWr5v zCsgs19U;e^(l~QWwL|x!MSo0ht#XJ1N47X9V7fRGh}f#F^zZ!uZtG2G@6F7sEk@4% zxjcp1&kOWR%z-+7O|))7-lbtB1}NOXF_$H4Cm2wN2PjgrjG)+mkw>CDu##yad;2Yj zwfDsCj%OnKJMS6&I8p;r`G=-R2i7ERV>>`zbPcJ4o&!V#%LbI^|GG?#-|KBFfg-Tq z0M!Q9IK8VT&^QxtF3>;Dr3(N@o|QZ0(VSJne(_`wWpd78b`5%89jG<{6V3i#NK1EO ztfbLnk$4f>=n7*mjlT3dr@?xnjN&jC5`4Jv{)Yhc^#)$Y?+`zaqPKwYM%Pe)%XM== zu-Q(WO~wBK1@Ukjk2G~`>Dqrjj85XM1nrWhVY8WfgMfMfXR)V9Lrz{f$dwP+bRG0W z>67)QR=fxpw%IU7`cJj)b$Dr!F`Ay#xg#O~)vFDl!~jeI9YZMJ5A(bT^^~t&O>G5h z!>JN~;*~#u+u$)W0NDitca&cJ7jHfnPXhR9!HgMO`hGxUJpw>)x&hAP#FJRk-*65< z5++(#BzCU4JIEx>$pc8K0wo6IjchC!eRh1k313|??yM;da8muxWdD9C&mUmmjRHQZ zyD5=p`A-D$K_1iTtHp)aFizDpdS{XgvsYF4XZreRFoVphG&z>T`#&|ET_~~axutcm zVXwVUL*ZA5}4_|Id{`cf;Z1xx~o>5%w!(}1H$iQ_g!MpNw#zk z2;VIn2^J9ef1rfxVIakHoOFQFgztakIOtCEM6Lm_u-Pu1P5VvF)}YP&-$)z4Cf&62 z#FS2K7ElmEc25t?2DF@bfX&fllm#BFO}Clge7pV!c#cv`Q#c^XLHu5EW!OhH{Kd*+ zb7$UG7WR-{YtNr~0bB}9N>dCa@G7+)jPQ(y(hr1YuOfo*6L_8Qn zkQM?TxT{v+fZU$mBw~H#QqmLl=7_o^BAnivVjR5Qb89PsaB1&dO2_!(RSs_m5`UMk zQd{yr3oGa`$VF_m_jvU(IZl+pm&w0il0(`XKngxj!kcz_no8$A0BP?~jkgl<1tM}! zG*v6c=;$`e0cyx05Ma``{9l9_Y{k*s^;vIHmBkUamIQ+BaGY7C|2Yd!PRSt@AH(^j zsyQUVGt(q~AoeCr7PfrszxV*WDoCCVS>#u1fT` zOZk02h3qR$UyBfAk`>HamR|LEZ-IsupoQO^*W7U;+b!Hn=h$Mv=>N&@5ZkwS_*TYr zt2d=RE2Gvf6S-nM&ZHosz5k_CWVhc7jvvjG_)VJ6B-?8lr@Ge#NYbRs>i=^C5)!FM z-sY~Fh-T2LaSO*;dt`L(HqWY7TD<7t(r{E?r^zU9fVy%e(B8fwf0FB{U z;U484yw|TK*p}cX$p#7@ARVU6kZPul?CYN z{zcPzIV-3J*|`v59F(xGuStv{VW3x6_7*btiGmS3+RfJ(*iOxcuiFHaAI-gW{boyO zPEz9Ho_*RK;Z6JfDH#|ID{(=DoW5nqXfVVj$y~~+=_}8oNF)hg-OQW1ZgyZ>0OSTl zwuPLg7aH~%c52>WIJFON*L&~6V%8IA$+QR+fwUovOA(%fpRGCgYWl-Rn%F&@#qk>c zi5em~MXNZm?=CYM$Q$E2pzcG7x)UHa z3l1uwkAjxOVIQr+2>PouH`aaILx-Nbziw=glF<&puNSCI)=|HmVsx(CCT7WhHAd!D zZC(>lgVU<$47nc9B+`Cwwb_VYOTu%y<^v0TKV!b{$Kx5zvp>CQ`{)OcpLR7SkoL>w zMUqGs@0Z879q`Qt_xVw07eP(9+v}1^Z$x;Wm|R?Xt1DN-snw%BI6xf~SkDGAoW#Mw zzkT&J=)cmnPVW^TSWN{$Jc~n2`F=LB`3Ze!HM{ku{dKSAX=n)}z}}adWJN%5uh1x1 z!?VkV06k6VWPzhhm6j0Nzfu@8hG`mDC>pmIHXT742|eBR-2!Q68+kjbZWH~dg@n^I zAmbZrn+%h6xg`r`HOYrX`zt9G4pRQbi~*@9M^0*}Exs90qlhM_If@MRmhN<~i(8M% z4NM!x?#vM^`HSU!nwGbdeIdK_mA1ZZ|tRkA${otOwvA}Vexr2u)F-cPb*eb^GcGYI{C2z?6 z1wl>gz6G3^ap_|@y@#UVZV8SvL5t+`Rek#_fVrRe}@YFdQ`%7dY_l}?Nhau$GtWU(gs!NmlDSBG0Gfb zIMLc`R2m~*4hfI0%Te_B@nB=2HKxd)3x1L`M%m`J>Vw>?{8`g&m^{=Jhj>lR3=^gR;1+uV|E2ruJS^M zd6K}>$u6ggs93f>v#OzRwIuEUYy-yx?x!3Ij_yYJRQ*w85 zz4}RSGkfM~(L?9`Xwa!kQJ|{uUo1#zg>Wg1Li+qE-Ze#cDKlHR$Ds&xVx;gcOYZa(!9#d!rb& zIw-4C)uycEmNH#Q(R}i1YFJ&wM*D-tC>`5hMa!}2sEjkqJ2y0rw(wRNW+1Gd_wwZHh#D26jaZ#=J(5od9&k@)uQotqeES2*gd_R4AqUY+8moEt% ze0RN7l02E0+3DCzp=)NGH&`0h(PI-UNhVG*m`PuUTuyR1j*MUGu%u2>Z+RMMjYRmd8DN5*Xbtnz(aWO`m_@G zjoM3#n0+FJY#`igOIu*4kTN@bL&UPkvkfnsTzozM_MtE!ID2NU5$0KyYOrzTI6(KrE&KgEhj?zxHx09T#!H&A z7-mr+dC>Z3^VY+8CUAl_EBW@7`}e*(TPKup2CK>bV1Oq zqNTXrcuD=5(Lv?c@tiW)^GSo!y=R^q(FUvelD2M5-`V=f#lPtYlikj6(UDc?!seh$ zkLzPCHPR0i=hZ8^XZ?JAivFPMdfTXOJV-|QYMRk6%Ji?J7T1NwArV3R268U8EN9%2 zBY1Whb#{|&2X?tRn5QTI(f7AB&=)I;<;!_ols1L@fjSmynM~D->d*!TayBsf*j$_% zVo|U91bHYLbZJD<|CT`cbxFNqj&@H9_o7z`(dBu8{CWN({`iidw-lGzJD);SsPKr{swNa5HQEo4IhsVA5NCSh@SL@rKeO&(BqMBiJu3O`L7K35v|RF5x2_3Dy{K%n8hx~ocDs9&D+{A0m-`> zhsWw>6>H9_TERbm8ba`P!e2r^@qUmmNHi|jvc4Tebrcy^OVu)2v_IMt#$3*>pIWdr zA~uyx=6uQnlm6IXNYc4Ys=tm%I~L3@KS&i0I!8T)s$}%d9dL#RQYBB3ABI(a9xnz6 ze=YQoeNDq*#pBVb?ZQ=t7ChZQ;5P``XFjMleZ76l#gLq5Hbg90Ur{5dmI<0YeR_(CqhbCG})1L7%+xS{py|Mm?t)8aFa#c=$s_yD8Xf-#pwS_0b zjg*wic$<5bE3cVGxuSc)`9ywkMoFT^<>@(b1L{Cxx2-}1h;!?^T=zQg4nj6yARzJ z{I$9ws-VRbSq{gVA-H$ttb&3V5`YR$M!|tK^<1SBlPyEVp#SOl9|&RZN`0{T9>GDC z#m=K3cnQ@`H|by!v%343cm>x@LUBw##a>l0EM`YY=Vc%(A3X{k`PIN97ZZ z!_YhCvPyAw$;pL2^1`F=_)!Urdt;#MmsQZHjL*jiH?CJ#DyLP?!(wo$R#iS9C?XQabGx-{%P;!<5Y-;wwIJ|Ft@vi+=X+AKD~pWnuX zkgsb#{!KQ2;gE~}S&sd2{mBBwDA!WORjYwMW|#NQ<*gTRT}R@VWMGaHGc*gKpNO#- zl-zZf-`K}sd2fiQe7dWh%WZMV(jr1GL_3yOVEB@-(y0K`t(G6{Q-QQQXALu>zhNeS zxBLW+i-kH|k-Uk5TzBZB8Kw?HzX-UY;yGPB%@*G2G_b%~dtDMLPA^+RrixkD<*X4O zKgN@%og$WbLl`n*V2*%O7E1mzwG4XcJ8{fN(FoO*ER9RQjV~_@sKF>65Big8w)tig zj43)6tg8I<#*Olb+1ZVS)xC<@Tq;>P>VE4s6L_E~D5ifJrL4M${>X4yw#^PAF`0HbnOnDhbxpH1qPRce6!{V$~1-tr1HTc3qT;Qpy^<}`Uo#ND6P{S%y z6JTT!j${mhOJN)64VV5Cwd#y+Ra9VR4qKciLC__VC$#?Dy`Q-0b3EgWfJcO5!JCI9 z!yw>`-JK4dYT~VqFAQ+q56-NPM>i$G&Sl%$q!R4N(FJ(?t@~q2+uD-KsVtI{ z6Ob`k<%mbX4Np|T4_sS#t;5dI z9Uv4rZSbthi*^ZU>+GfulIkCbS)XC&0 zxhl~b$Hh)B315$HJ*m-dskc$EX6tQ%_ANb?_U&99V$qtbVLz*^Hq3MgsE%hBChK$M zSNAV^4N40~$cAgmarU`PB`jC{v|_7@R{kEYdUE6{lOrAoC_BgeITUE^$hv_=l43R) zJkAbWZw^%SeBKr&bC1i3n=so3anAcH)S6#=8glJx81euj^zq&o_a@{$H*=NVaK7|5du8`D-;xsEo`HPvjP#ZTdT)@+ zN$#9oW{>sJ;9n)}YelsJr?yH(iY-)=y%DjhX=5)l&b|CW8DrqS(Ge@|h@@eL6`mTS!19v6=U z+GfdyMof+#*Wj*M>#y>aY$P##;KGwrQW({5l$APsI}rB`y2zep^{dPHRwaYd(BWsI zR*$N&(S8iW3s3lg{<>x0x{X}k0?`5KSm0U&4T+tR;7Q-SDnO_tfHDo#(WpdvqpBmS zfAhs{=G)untmi|MK4)y_nw=m6C{@J!v@8mukz*ZE#6EW`T$jUY{}P8B*gWu(4cCnu6lG$+jzn-|Tz3!0;cb=%I}MJ27+^gV3T z3ADnHK?nD98~U${FgF?x>;AY-q{c{L-c0{Rq_=&eP>iHjjZV&1E*7&bkr}Uh8_d^D zkebggA9wG3`q1o;1_P5dx*Rs#__BKSL>2Sf&OAIHr8}G(3qjc)n~?%9FKQTH zJriCPgzDIURO@(!>#C(T5xH-c#=HLrzp3)vwHHtSRzNejVIX9i-7Ax{Us!Jiim#lv zKkqQ1qZeblUy}`H|^_8 z^+koHpRK6jT(mse%*-66+vX#JbT2y^+^RLjGjtwF3=hM(h|z_4~?3lK+75^ z{xIR`3u0x#gPkeAl{;6#S9w^sA0AOlaE47cSmaWK{lLUteO%Y=(CSk7_Ny!2_N~iL zwDx~6ehbaD@pJ5-$CRZ7Ix;yvkU8PE7)uIy34LYcaVc{8y1ls&O0##{jS0SySQ6^W zrK&38pJD*c<~gf+uImVQX#p$^?$OtsV#|z|H#T;vzn0hNw2`Znb}N^lb>%p@SU~Q~ zvOdE-tTM!WYAQ5LYsySr0tYv>_fwrXJot#~k0O@Mj&I~@cEJ;M(l+lVa92H56c;}+ zBKyd%g4S_oLKZ{W%B5hn)ajgCv#Sb)BotucIFBf$^x6L}q z{73)zfzORsK+FAK_;{BuD*FkU7WOHhaK10LrT6=+QuEP!jvJ4U3Dyv+4d#*sec-PbsrP6p6J5G=G zyq5HiPem-CHmC6bp--!N3k?qiKJ(p(*{Vwx!Ix+>*~Y95JCJ#dE=G_D)L!XDEIwbb zVdm~77%O1o@vlM4+2LBp$^8+H(_TbhMwYer`LN2vpB*^*bl#6At>=V;IdNNJL-PoE zpx+DFsK}>PkaGLX-`(kk6FDin*$K=_m#vRBfV`h*1)r7V25}$F)^Dg_LQ--kGBnL{ z3F+e@&x1u z=xDELF>i4ua2vjv4?Fa6q*y3eYyT5>s7L(2Xr-%eR(D#nYwB`=s6hibQQ_gQ>7-}B zly3n0G&Cu<4D|JAGx%J8{21Fu+pYt9`sKUpN`@&N#me|ouQ z?l#dsnXfOGjI}_FxGqpl`Pu9Pg#~qTGlr&X4{!N_E6frIPdozM%=!K#ls=uQ9|+gd zU_tWp!A|;Fw7Sz?SY8?J7>l_Y)<0kWSBiutLsHDAxb_qLt~)PM-OqJ|2z_=0$8P>< zP&+%<7RnKPTqSl+nK`F~+38NKW)|gQN%-a7BPa9#C$6)XhYyTA-24^9S1|m9Rg|OI zV2ZDGwwe76ipA|gvyMQ}D|3s9&_d``S?+YIxlq^Mttq4X#+j=jbun3bRBbL7&Sa%( z^O1^0_1AjK*_hNrC;Xf!Wp!F_>i7gdw&W28e*fu0R5IpX&MDg~zH&RFgk{f03{x=O zTy||L6(sZ1!dM6b%>P<0f%?I0*h*z8+zqu%?cDslc%SX*lHnZL>2KQN!!s}xV@i@x z6PIkOBD`+5XwxWx!m3bj%EL?W2U~7iHJGE*g zo7e5VWXD^(}NXBlJ?BvWUr1P}^J^+Z`YJEJpvr7d&fXX=C z0JjB#wi}!>KWpZrGD>uaym2R$r#-*fj`_xx&I{)A61&3zL|J&)>WcO2(4yKs^l-?K zs;!q_f0@R{a1L5(><<(gtO#TW{Ikl!G@JTEi)XP;hN<- zycXDT9b0!3w+!R%D}3uY4IGR`s{Ee<*%@;O0bz3ECgEU58nZ3XAJ4}`9N|Ymv?}_z>a2aV+5>zJbvr$W7g42ZkXw&A9pR-FNBu!G5 zey|<^fwN}xgsWeyd^0UAo-_K6g;YGqK62@|ZrXxSW@=!FVrxvwXOVF;o?BEmZNmB0 z$2{CWUo`LNdk-{U8$nlJra=s1YY!t2?6>j>4dZVuXnRjzNMXR{VJtNB+U9BIkAm4k z+XhE;3c@X;*Y(AIXja<7qX}!u80cBu)|+p|FhvFStNVf$O3@dNd_Y?{!Cem?37paW zn`22Q42F9$S8Fd?RNL-YL8pWp6@a~~Fd6V41l@)}AG?w9*=W_v&FAP`?B|Xg?w!y$ zb}w9*tnQlQ#W$vbKlibepd{vZ=dyDE=e4TQ`Ahr2LETGLS| zxM*HdR%-o%i|h@?RQPRXed{FZg|j`;S=`r{kJ)rzJlcE#^#Nuv89#zZZIwq^_l^xJ z{MFpW>c5fcc3Gp^5oggb#kqsN?K{6se)qmgeO7%V?Z&4xxaQG)QP8p&+jXGTIip|y zg81hk?u<0mgw?;^(bMd~1?uVr&)G@C2*tdc3o7n=KR+_5x86OX-eV$D+v@EmV*8!o zZyxa__I_WT;8}}=jScou_uKHQe6=lekK$XW-|-Qn$$X08&YtCB2 zq-*bVJke(M4AC93p$Gp~3g~Q4lQ@R={85(`;`JpnY&(Blp5%h_L0Ydwr8aZp$k*OO z7ES0>-CIb;%$99pPS3?-&3qqC>U1p^3;OQBUoj(yIH7Tszq&|>4Khh z6hN2b^2N%_B&*C`%NH-ksJZzFynMf$I1%po$nf#0J$Y=Adu852wv*kl<}KXUMec-O zG;XY_CdFcVP|iCIbD;m$VrE&ia+kyQ*ouY} z-l4-o+mcY<=V#X)e0VVQRxsQLc+^oY@kCyU4dOMh=Z!P`QZx@+(o->VkqTd{GUio-20 z=!tOzTri{jdi&N?&XS{bMpyfcC}!$f@9Nxks|I?9d)>YBi!1pjcY9kA+Yvc|$L=P) z%W#yX*uc;|gJIigv(@u_{~;D|d9Y%6t7)pEn?D+N+rvZoK91jLs(SLOid`>YP{Kdgt4ga73E6+OWk(xOJ~SgE z!4aUvx$U8EX*756Rt970fM9!Wh$^8v3y*EnYId%kyP37{Pq*(q&0+zw?JoA~qPyyX zaPBdD(W~2fstM`+C&`jfjA6`0*qgXdq!4Pz)Cj2>fM=EGEL@HQ+w|MF?jH~i$gT2U z3JHT(**kot%sv7&0=qXv=Qd&7dbqn4>-K_Qw|{tfus$(2-L5_N-CIExo9<*$&rWzr z%fa4x>wI&h^ENY&nTG9&44z;df|8rs+M7pCR^xu<|AwyS^;7`)cW)VfQ6axTqyorR6*)|rG7I?hwU52cL5jX-bT5{7 zj7`4NLH~$)b;bGF23b>3qxbUB-PAz!1Z&pZ`h5700zbQ!1f#{UjxVZCb-#%)og)E` zUH(@%{Q7udpMUqu?iB_Xx{sS*eH8cY{T+@DJme#fd{e)*MRI*n{DJHPNrTaakkK)e z%MK4f8N{K#;6cil6DeYTSXR`+PL5QAhqMKq#tTpSPGzP0!pomdzc*xr6YTjEYi@^C zhF;$@Un(g30mftybBzB2bLS_2VL$%ECYj;cvD5Xug>1$_?sbq{CtG3i%@9;-!_l~e z6^WsS8@Utl>+PwcHY0th^Lchuf4K9rG2<{{8)OXryjy4YM2u!m z`+om@-1Pz$@<{$+it7o)*VKjs0AjWea;(vHHOSrj=9>WoccIzrm`%w3}o%) z?{V>?bu{^Ih%Iw%g7qo#*u4?2RKk>x=&)sk!8ZF__g*ICwPSXdf6Mz5@qj}$6RVx# z;<|8oH_9%u4M@CP2={^bVX}ugUHxCV`eL68C9wAX0ce4KGf zIHZT*m%N&VE7U<-$}+d7gK2JRtNOznvQ;C|lOy}DhLUdXbDomv4)oU^!uy-uz8uAp zs9LI)3P)Cc#vp+NSix-C~nq~Q45 zSgxx-j=X(r%BYb9h$JqsE1gO*jW2y=VaMXx@$=nl7T>3-``a2=5O8^M2#NcRxt@c< zi-$9wI^_zzGydB;eZ%Kd*p@CH)5&>XyGf@Kq+=8 zB*2DWepahreZL=hq`oX%Sq@X+*&^tl*xiW&a+z;Riteb7Kaq54c8asBIP$rhb-kja zc}o5o_0@P1nP})V*RE^0u}t|0!V}8%!QTB)t{*((B6x-BP?<2rd<8SJwA+w}$(3z8 zm;PGLkpQvj2@2lq!5<^uM)6K%GslxZ<9Dtd+rJS5i+C`)r4CetFxATE7ZQ{p z1+wotVls}u>yK`9)%#Yf8@X`~&-on|$BGdHY1?-9(yi z8GiX@qAvf!X0`7ci%|MAnL#$$Xj^xr05iAE3_xiU+ADV|WF$bfgs73zy#A%Rdn7?e zWb3P#>qaj2a0FfLis?$(qov$wCla(kbp5@IeU`gGaV)6}w|Hgh!~e7Q#zmKYPxp$C zVW0ZN+HmVKzFB#WKc_f$UTxWtzUZA*SNe>mDYw4z=4=Kou`38*>)+aQ@Pkg4U-r!R zI?YvQA1###N=%rurfv7L)VghR=DYkZ5-a6$KGs(FanY$06Ux_~^X_@k==}e@?R$-(y9d&;)PKbsfb{o%%)oo}b>33>ipRciYrP@j2^ zw?K2Z(Rx1D{h1TvSk|y@`?cZg&)SZ|4^J<6X3(&A$-fFo;C4rY^nb=%bGo+2uh-XH zr1t9Rmjx?71^q3Y8MS%If8dOeMUlzFU?-jTJjtiy7`qRea?OyKsn7Ou?bXBD$zI%- zYLad)D*O;PcS5@V|Kd#?v$HC6U)GB)<(buTu&!Ena^i**j-#@sPT~tC))?Nud%dnY zIw!Z2nens&&-sq32@kIPJi6!6?bns{T9*v+zr2&k_)v36iOc(Lne8SE{X@?$RPM~1 z_1*NAm+s-3-km(=A^paIb-@ddZH-&4bLCo?iGW12QNIM{R*S9&t=)Q(=)!lkQFngv$@l)E7vEMsU&(**b?S$d zjCi>}wV`%foAhc` znX0>PtN&eoer36;)WyaBjcfmU|D5ld`F!eW=ggB&4ZqiaxiyWmX=T;L>*1=BONES& zGF;uox!vsH^6#I%@;=>td0yW8&zdZP<#3-xNPXX|xOo%1{-?(F~n7fYhSMR?Aj?OgU*%b#pNa5 z1P%!uU|ezR@;=^nSNnXizxNi+x955NYrgBvYoBMi8;h;q_<3JkHS^tZ}sg{i@vXf=>blFHs~f@l>WJBzIk`u^9BEz84C7p%9y|7#q-2Mv6GJv z-?=XK^Ijrkk?A^N11`n~1{d0aBZH%WZXu8}<-dHXdfNg)0p;r;OFdovT-G@yGywpR CNw>lP literal 0 HcmV?d00001 diff --git a/main.py b/main.py new file mode 100644 index 0000000..11c9411 --- /dev/null +++ b/main.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +""" +LinkedIn Post Creation System +Multi-Agent AI Workflow + +Main entry point for the application. +""" +import asyncio +from loguru import logger + +from src.tui.app import run_app + + +def setup_logging(): + """Configure logging - only to file, not console.""" + # Remove default handler that logs to console + logger.remove() + + # Add file handler only + logger.add( + "logs/workflow_{time:YYYY-MM-DD}.log", + rotation="1 day", + retention="7 days", + level="INFO", + format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}" + ) + logger.info("Logging configured (file only)") + + +def main(): + """Main entry point.""" + # Setup logging + setup_logging() + + logger.info("Starting LinkedIn Workflow System") + + # Run TUI application + try: + run_app() + except KeyboardInterrupt: + logger.info("Application interrupted by user") + except Exception as e: + logger.exception(f"Application error: {e}") + raise + finally: + logger.info("Application shutdown") + + +if __name__ == "__main__": + main() diff --git a/maintenance_cleanup_reposts.py b/maintenance_cleanup_reposts.py new file mode 100644 index 0000000..7dc8f5d --- /dev/null +++ b/maintenance_cleanup_reposts.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +""" +Maintenance script to remove repost/non-regular posts from the database. + +This removes LinkedIn posts that are reposts, shares, or any other non-original content. +Only posts with post_type "regular" in their raw_data should remain. + +Usage: + python maintenance_cleanup_reposts.py # Dry run (preview what will be deleted) + python maintenance_cleanup_reposts.py --apply # Actually delete the posts +""" + +import asyncio +import sys +from uuid import UUID + +from loguru import logger + +from src.database import db + + +async def cleanup_reposts(apply: bool = False): + """ + Find and remove all non-regular posts from the database. + + Args: + apply: If True, delete posts. If False, just preview. + """ + logger.info("Loading all customers...") + customers = await db.list_customers() + + total_posts = 0 + regular_posts = 0 + posts_to_delete = [] + + for customer in customers: + posts = await db.get_linkedin_posts(customer.id) + + for post in posts: + total_posts += 1 + + # Check post_type in raw_data + post_type = None + if post.raw_data and isinstance(post.raw_data, dict): + post_type = post.raw_data.get("post_type", "").lower() + + if post_type == "regular": + regular_posts += 1 + else: + posts_to_delete.append({ + 'id': post.id, + 'customer': customer.name, + 'post_type': post_type or 'unknown', + 'text_preview': (post.post_text[:80] + '...') if post.post_text and len(post.post_text) > 80 else post.post_text, + 'url': post.post_url + }) + + # Print summary + print(f"\n{'='*70}") + print(f"SCAN RESULTS") + print(f"{'='*70}") + print(f"Total posts scanned: {total_posts}") + print(f"Regular posts (keep): {regular_posts}") + print(f"Non-regular (delete): {len(posts_to_delete)}") + + if not posts_to_delete: + print("\nNo posts to delete! Database is clean.") + return + + # Show posts to delete + print(f"\n{'='*70}") + print(f"POSTS TO DELETE") + print(f"{'='*70}") + + # Group by post_type for cleaner output + by_type = {} + for post in posts_to_delete: + pt = post['post_type'] + if pt not in by_type: + by_type[pt] = [] + by_type[pt].append(post) + + for post_type, posts in by_type.items(): + print(f"\n[{post_type.upper()}] - {len(posts)} posts") + print("-" * 50) + for post in posts[:5]: # Show max 5 per type + print(f" Customer: {post['customer']}") + print(f" Preview: {post['text_preview']}") + print(f" ID: {post['id']}") + print() + if len(posts) > 5: + print(f" ... and {len(posts) - 5} more {post_type} posts\n") + + if apply: + print(f"\n{'='*70}") + print(f"DELETING {len(posts_to_delete)} POSTS...") + print(f"{'='*70}") + + deleted = 0 + errors = 0 + + for post_data in posts_to_delete: + try: + await asyncio.to_thread( + lambda pid=post_data['id']: + db.client.table("linkedin_posts").delete().eq("id", str(pid)).execute() + ) + deleted += 1 + if deleted % 10 == 0: + print(f" Deleted {deleted}/{len(posts_to_delete)}...") + except Exception as e: + logger.error(f"Failed to delete post {post_data['id']}: {e}") + errors += 1 + + print(f"\nDone! Deleted {deleted} posts. Errors: {errors}") + else: + print(f"\n{'='*70}") + print(f"DRY RUN - No changes made.") + print(f"Run with --apply to delete these {len(posts_to_delete)} posts.") + print(f"{'='*70}") + + +async def main(): + apply = '--apply' in sys.argv + + if apply: + print("="*70) + print("MODE: DELETE POSTS") + print("="*70) + print(f"\nThis will permanently delete non-regular posts from the database.") + print("This action cannot be undone!\n") + response = input("Are you sure? Type 'DELETE' to confirm: ") + if response != 'DELETE': + print("Aborted.") + return + else: + print("="*70) + print("MODE: DRY RUN (preview only)") + print("="*70) + print("Add --apply flag to actually delete posts.\n") + + await cleanup_reposts(apply=apply) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/maintenance_extract_topics.py b/maintenance_extract_topics.py new file mode 100644 index 0000000..aaef6f2 --- /dev/null +++ b/maintenance_extract_topics.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +""" +Maintenance script to extract and save topics for existing customers. + +This script: +1. Loads all customers +2. For each customer, extracts topics from existing posts +3. Saves extracted topics to the topics table +4. Also saves any topics from research results to the topics table +""" +import asyncio +from loguru import logger + +from src.database import db +from src.agents import TopicExtractorAgent + + +async def extract_and_save_topics_for_customer(customer_id): + """Extract and save topics for a single customer.""" + logger.info(f"Processing customer: {customer_id}") + + # Get customer + customer = await db.get_customer(customer_id) + if not customer: + logger.error(f"Customer {customer_id} not found") + return + + logger.info(f"Customer: {customer.name}") + + # Get LinkedIn posts + posts = await db.get_linkedin_posts(customer_id) + logger.info(f"Found {len(posts)} posts") + + if not posts: + logger.warning("No posts found, skipping topic extraction") + else: + # Extract topics from posts + logger.info("Extracting topics from posts...") + topic_extractor = TopicExtractorAgent() + + try: + topics = await topic_extractor.process( + posts=posts, + customer_id=customer_id + ) + + if topics: + # Save topics + saved_topics = await db.save_topics(topics) + logger.info(f"✓ Saved {len(saved_topics)} extracted topics") + else: + logger.warning("No topics extracted") + + except Exception as e: + logger.error(f"Failed to extract topics: {e}", exc_info=True) + + logger.info(f"Finished processing customer: {customer.name}\n") + + +async def main(): + """Main function.""" + logger.info("=== TOPIC EXTRACTION MAINTENANCE SCRIPT ===\n") + + # List all customers + customers = await db.list_customers() + + if not customers: + logger.warning("No customers found") + return + + logger.info(f"Found {len(customers)} customers\n") + + # Process each customer + for customer in customers: + try: + await extract_and_save_topics_for_customer(customer.id) + except Exception as e: + logger.error(f"Error processing customer {customer.id}: {e}", exc_info=True) + + logger.info("\n=== MAINTENANCE COMPLETE ===") + + +if __name__ == "__main__": + # Setup logging + logger.add( + "logs/maintenance_{time:YYYY-MM-DD}.log", + rotation="1 day", + retention="7 days", + level="INFO" + ) + + # Run + asyncio.run(main()) diff --git a/maintenance_fix_markdown_bold.py b/maintenance_fix_markdown_bold.py new file mode 100644 index 0000000..6818945 --- /dev/null +++ b/maintenance_fix_markdown_bold.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3 +""" +Maintenance script to convert Markdown bold (**text**) to Unicode bold. + +This fixes posts that contain Markdown formatting which doesn't render on LinkedIn. +Unicode bold characters are used instead, which display correctly on LinkedIn. + +Usage: + python maintenance_fix_markdown_bold.py # Dry run (preview changes) + python maintenance_fix_markdown_bold.py --apply # Apply changes to database +""" + +import asyncio +import re +import sys +from uuid import UUID + +from loguru import logger + +from src.database import db + + +# Unicode Bold character mappings (Mathematical Sans-Serif Bold) +BOLD_MAP = { + # Uppercase A-Z + 'A': '𝗔', 'B': '𝗕', 'C': '𝗖', 'D': '𝗗', 'E': '𝗘', 'F': '𝗙', 'G': '𝗚', + 'H': '𝗛', 'I': '𝗜', 'J': '𝗝', 'K': '𝗞', 'L': '𝗟', 'M': '𝗠', 'N': '𝗡', + 'O': '𝗢', 'P': '𝗣', 'Q': '𝗤', 'R': '𝗥', 'S': '𝗦', 'T': '𝗧', 'U': '𝗨', + 'V': '𝗩', 'W': '𝗪', 'X': '𝗫', 'Y': '𝗬', 'Z': '𝗭', + # Lowercase a-z + 'a': '𝗮', 'b': '𝗯', 'c': '𝗰', 'd': '𝗱', 'e': '𝗲', 'f': '𝗳', 'g': '𝗴', + 'h': '𝗵', 'i': '𝗶', 'j': '𝗷', 'k': '𝗸', 'l': '𝗹', 'm': '𝗺', 'n': '𝗻', + 'o': '𝗼', 'p': '𝗽', 'q': '𝗾', 'r': '𝗿', 's': '𝘀', 't': '𝘁', 'u': '𝘂', + 'v': '𝘃', 'w': '𝘄', 'x': '𝘅', 'y': '𝘆', 'z': '𝘇', + # Numbers 0-9 + '0': '𝟬', '1': '𝟭', '2': '𝟮', '3': '𝟯', '4': '𝟰', + '5': '𝟱', '6': '𝟲', '7': '𝟳', '8': '𝟴', '9': '𝟵', + # German umlauts + 'Ä': '𝗔̈', 'Ö': '𝗢̈', 'Ü': '𝗨̈', + 'ä': '𝗮̈', 'ö': '𝗼̈', 'ü': '𝘂̈', + 'ß': 'ß', # No bold variant, keep as is +} + + +def to_unicode_bold(text: str) -> str: + """Convert plain text to Unicode bold characters.""" + result = [] + for char in text: + result.append(BOLD_MAP.get(char, char)) + return ''.join(result) + + +def convert_markdown_bold(content: str) -> str: + """ + Convert Markdown bold (**text**) to Unicode bold. + + Also handles: + - __text__ (alternative markdown bold) + - Nested or multiple occurrences + """ + # Pattern for **text** (non-greedy, handles multiple) + pattern_asterisk = r'\*\*(.+?)\*\*' + # Pattern for __text__ + pattern_underscore = r'__(.+?)__' + + def replace_with_bold(match): + inner_text = match.group(1) + return to_unicode_bold(inner_text) + + # Apply conversions + result = re.sub(pattern_asterisk, replace_with_bold, content) + result = re.sub(pattern_underscore, replace_with_bold, result) + + return result + + +def has_markdown_bold(content: str) -> bool: + """Check if content contains Markdown bold syntax.""" + return bool(re.search(r'\*\*.+?\*\*|__.+?__', content)) + + +async def fix_all_posts(apply: bool = False): + """ + Find and fix all posts with Markdown bold formatting. + + Args: + apply: If True, apply changes to database. If False, just preview. + """ + logger.info("Loading all customers...") + customers = await db.list_customers() + + total_posts = 0 + posts_with_markdown = 0 + fixed_posts = [] + + for customer in customers: + posts = await db.get_generated_posts(customer.id) + + for post in posts: + total_posts += 1 + + if not post.post_content: + continue + + if has_markdown_bold(post.post_content): + posts_with_markdown += 1 + original = post.post_content + converted = convert_markdown_bold(original) + + fixed_posts.append({ + 'id': post.id, + 'customer': customer.name, + 'topic': post.topic_title, + 'original': original, + 'converted': converted, + }) + + # Show preview + print(f"\n{'='*60}") + print(f"Post: {post.topic_title}") + print(f"Customer: {customer.name}") + print(f"ID: {post.id}") + print(f"{'-'*60}") + + # Find and highlight the changes + bold_matches = re.findall(r'\*\*(.+?)\*\*|__(.+?)__', original) + for match in bold_matches: + text = match[0] or match[1] + print(f" **{text}** → {to_unicode_bold(text)}") + + print(f"\n{'='*60}") + print(f"SUMMARY") + print(f"{'='*60}") + print(f"Total posts scanned: {total_posts}") + print(f"Posts with Markdown bold: {posts_with_markdown}") + + if not fixed_posts: + print("\nNo posts need fixing!") + return + + if apply: + print(f"\nApplying changes to {len(fixed_posts)} posts...") + + for post_data in fixed_posts: + try: + # Update the post in database + await asyncio.to_thread( + lambda pid=post_data['id'], content=post_data['converted']: + db.client.table("generated_posts").update({ + "post_content": content + }).eq("id", str(pid)).execute() + ) + logger.info(f"Fixed post: {post_data['topic']}") + except Exception as e: + logger.error(f"Failed to update post {post_data['id']}: {e}") + + print(f"\nDone! Fixed {len(fixed_posts)} posts.") + else: + print(f"\nDRY RUN - No changes applied.") + print(f"Run with --apply to fix these {len(fixed_posts)} posts.") + + +async def main(): + apply = '--apply' in sys.argv + + if apply: + print("MODE: APPLY CHANGES") + print("This will modify posts in the database.") + response = input("Are you sure? (yes/no): ") + if response.lower() != 'yes': + print("Aborted.") + return + else: + print("MODE: DRY RUN (preview only)") + print("Add --apply flag to actually modify posts.\n") + + await fix_all_posts(apply=apply) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/post_output.json b/post_output.json new file mode 100644 index 0000000..02cd500 --- /dev/null +++ b/post_output.json @@ -0,0 +1,466 @@ +[ + { + "urn": { + "activity_urn": "7419268832684441600", + "share_urn": "7419041318460571648", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7419268832684441600", + "posted_at": { + "date": "2026-01-20 07:45:33", + "relative": "11 hours ago • Edited • Visible to anyone on or off LinkedIn", + "timestamp": 1768891533061 + }, + "text": "𝗞𝘂𝗻𝗱𝗲𝗻 𝘄𝗼𝗹𝗹𝗲𝗻 𝗶𝗺𝗺𝗲𝗿 ö𝗳𝘁𝗲𝗿 𝗧𝗲𝘅𝘁𝗲 𝘃𝗼𝗻 𝘂𝗻𝘀 „𝗲𝗻𝘁-𝗞𝗜-𝗶𝗳𝗶𝘇𝗶𝗲𝗿𝘁“. Hirnschmalz matters! 🤷‍♀️\n\n\nEin guter Trend finde ich.\n\n… und wir bekommen auch immer mehr Aufträge Kommunikationskonzepte von ihrer „undurchdachten Glattheit“ zu befreien.\n\nUnsere Kunden nennen das…\n\n… „re-humanisieren“:)\n\n\n\n\n\nVielleicht könnte das ein neuer Beruf werden?\n\n\n𝗞𝗜-𝗥𝗘-𝗖𝗥𝗘𝗔𝗧𝗢𝗥 … oder\n\n𝗔𝗜 𝗦𝗘𝗡𝗦𝗘𝗠𝗔𝗞𝗜𝗡𝗚 𝗗𝗜𝗥𝗘𝗖𝗧𝗢𝗥… oder\n\n𝗛𝗨𝗠𝗔𝗡 𝗥𝗘𝗪𝗥𝗜𝗧𝗘 𝗦𝗣𝗘𝗖𝗜𝗔𝗟𝗜𝗦𝗧… oder\n\n𝗔𝗜 𝗦𝗧𝗬𝗟𝗘 𝗥𝗘𝗖𝗢𝗡𝗦𝗧𝗥𝗨𝗖𝗧𝗢𝗥…\n\n\nAI Sensemaking Director mag ich besonders. Habt ihr noch andere Titel-Ideen?\n\n….\n\nKurzer privater Ausflug zu diesem Thema:\n\nVor kurzem war ich auf einer Geburtstagsparty. \n\nPaul, der Ehemann, meinte es besonders gut und hat seiner Frau Brigitte eine Liebeserklärungs-Rede gehalten.\n\nEindeutig KI. \nPuuuuuuh.\n\nAlle applaudierten und flüsterten (eigentlich lästerten) hinter seinem Rücken.\n\nEs war bisschen peinlich für ihn.\n\n\nMeine Meinung:\n\nGenau dasselbe passiert gerade in Unternehmen!\n\nEs wird mit Hilfe von KI Einheitsbrei produziert: \n\n- pfurzlangweilige KI-Texte (sorry)\n- lieblose Reden und LinkedIn Posts\n- nicht zu Ende gedache, individuelle Kommunikations-Konzepte\netc\n\n\n\nSo geht gute Kommunikation … \nN I C H T ❌\n\n\nUnsere feste Überzeugung:\n\nNatürlich muss man KI nutzen.\nSie spart Kosten. Aber: \n\n\nBei Make It Matter haben wir zwei Regeln:\n\n\nKI- Regel Nr. 1️⃣ \n\n𝗗𝗶𝗲 𝗠𝗲𝗻𝘀𝗰𝗵 → 𝗞𝗜 → 𝗠𝗲𝗻𝘀𝗰𝗵-𝗟𝗼𝗴𝗶𝗸 𝗯𝗹𝗲𝗶𝗯𝘁 𝘂𝗻𝗮𝗻𝘁𝗮𝘀𝘁𝗯𝗮𝗿.\n\nWir nutzen KI nie ohne „menschliche Rücklogik und Hirnschmalz“. \n\nSonst passiert das Gleiche Pauls Rede: Zwar technisch sauber, aber emotional tot, durchschaubar und banal. Vor allem aber erkennt die KI die KI:)\n\n\nKI Regel Nr. 2️⃣ \n\n𝗗𝗶𝗲 „𝟲𝟬 % 𝗠𝗲𝗻𝘀𝗰𝗵 𝘃𝘀. 𝟰𝟬 % 𝗞𝗜“-𝗟𝗼𝗴𝗶𝗸.\n\nWir glauben: Mindestens 60 % Menschliche Intelligenz plus Erfahrung. Maximal 40 % KI.\n\n\nAlles andere erzeugt denselben grauen Brei wie im Bild. \n\nUnd wir lieben es doch möglichst bunt, individuell, tailormade und ECHT, oder?\n\nIch persönlich glaube ja, dass Menschen kaufen von Menschen, die sie \"echt spüren\".\n\nDabei? \nOder nicht dabei?\n\nLiebe Grüße\nEure Christina\n\n#Kommunikation\n#PublicRelations\n#LinkedInPR\n#MakeItMatter", + "url": "https://www.linkedin.com/posts/christinahildebrandt_kommunikation-publicrelations-linkedinpr-activity-7419268832684441600-IhhQ?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "regular", + "author": { + "first_name": "Christina", + "last_name": "Hildebrandt", + "headline": "Co-Founder MAKE IT MATTER I Kommunikationsberatung, Interimsmanagement, PR & LinkedIn-Kompetenz für KMUs und Agenturen, die wachsen wollen", + "username": "christinahildebrandt", + "profile_url": "https://www.linkedin.com/in/christinahildebrandt?miniProfileUrn=urn%3Ali%3Afsd_profile%3AACoAABZ-U_wBolcXsKNwUaGEDfBwACvP5M5DkmQ", + "profile_picture": "https://media.licdn.com/dms/image/v2/D4D03AQFz-EjLfIatDw/profile-displayphoto-shrink_800_800/B4DZaIPVU5GwAg-/0/1746042443543?e=1770249600&v=beta&t=h61WiK5XGoOjPqfncwXZ6gtB_WvNT7HPs23MX79n3BE" + }, + "stats": { + "total_reactions": 156, + "like": 117, + "support": 11, + "love": 3, + "insight": 9, + "celebrate": 16, + "funny": 0, + "comments": 141, + "reposts": 3 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D4D22AQGYIluS1b4Txg/feedshare-shrink_1280/B4DZvW6xAqJwAg-/0/1768837288206?e=1770249600&v=beta&t=ODTv1N94nno2U_3CMJsOkdcYcoPCHIWTO6eGRqiONYs", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D4D22AQGYIluS1b4Txg/feedshare-shrink_1280/B4DZvW6xAqJwAg-/0/1768837288206?e=1770249600&v=beta&t=ODTv1N94nno2U_3CMJsOkdcYcoPCHIWTO6eGRqiONYs", + "width": 1080, + "height": 1080 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + }, + { + "urn": { + "activity_urn": "7416373971899760640", + "share_urn": "7415336307318644736", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7416365884061077504", + "posted_at": { + "date": "2026-01-12 08:02:24", + "relative": "1 week ago • Edited • Visible to anyone on or off LinkedIn", + "timestamp": 1768201344466 + }, + "text": "Hamburg wir kommen!! \n\n \nChristina und ich haben uns diese Woche Zeit für unsere Jahresplanung genommen. \n\n2026 wird voll: Viele Termine, viele Events, viele Gespräche, auf die wir richtig Lust haben.\n\nAm meisten freuen wir uns aber auf eine Premiere für uns beide: unsere erste OMR 🤩 \n\nWarum?\nWeil wir neugierig sind. Natürlich auf den Hype und das Line Up (sagt man das bei Kongressen?) auf neue Perspektiven, gute Impulse und vor allem auf das Hamburger Netzwerk und darauf, Moritz Schubert endlich wieder live zu sehen!!! \n\n...Endlich mal raus aus der eigenen Bubble!\n\nKleiner Hinweis für alle, die noch überlegen: Aktuell gibt es noch den reduzierten \"Save Bird Preis\". Wer also ohnehin mit dem Gedanken spielt, jetzt ist ein guter Moment (Link im Kommentar).\n\nWir freuen uns auf neue Begegnungen und auf viele bekannte Gesichter.\nHamburg, wir kommen!!\n\nMake It Matter!!\n\n\nPS: Die Hotels scheinen jetzt schon recht voll - wer einen guten Tipp für uns hat - wir freuen uns über eure Empfehlungen (und natürlich auch über Treffen vor Ort!)", + "url": "https://www.linkedin.com/posts/lisa-hipp_hamburg-wir-kommen-christina-und-ich-activity-7416365884061077504-27D4?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "repost", + "author": { + "first_name": "LISA", + "last_name": "HIPP", + "headline": "Co-Founder of MAKE IT MATTER I Strategische Kommunikationsberatung, PR & LinkedIn-Kompetenz für KMUs und Agenturen, die wachsen wollen I Interimsmanager I Speakerin I Autorin", + "username": "lisa-hipp", + "profile_url": "https://www.linkedin.com/in/lisa-hipp?miniProfileUrn=urn%3Ali%3Afsd_profile%3AACoAABoIg7cBNIMrNROuYH0Eu8SrT_RBD19epvU", + "profile_picture": "https://media.licdn.com/dms/image/v2/D4E03AQF_O5ImniRHpA/profile-displayphoto-shrink_800_800/B4EZWVGXr8GgAg-/0/1741963230810?e=1770249600&v=beta&t=PGFdRS4hMFX9posogdbIRWixVm-4odWkunMbSm3nbgc" + }, + "stats": { + "total_reactions": 161, + "like": 131, + "support": 4, + "love": 13, + "insight": 0, + "celebrate": 13, + "funny": 0, + "comments": 38, + "reposts": 4 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D4D22AQEe1KShPsWiQQ/feedshare-shrink_1280/B4DZuiREcqLoAs-/0/1767953945477?e=1770249600&v=beta&t=QNZGVw3XiJny2oEy3rnfZ--UxX74NLsNLmU8cJy6RRg", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D4D22AQEe1KShPsWiQQ/feedshare-shrink_1280/B4DZuiREcqLoAs-/0/1767953945477?e=1770249600&v=beta&t=QNZGVw3XiJny2oEy3rnfZ--UxX74NLsNLmU8cJy6RRg", + "width": 1280, + "height": 1600 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + }, + { + "urn": { + "activity_urn": "7415265872736415745", + "share_urn": "7414935899727433728", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7414935901157621761", + "posted_at": { + "date": "2026-01-09 06:39:13", + "relative": "1 week ago • Edited • Visible to anyone on or off LinkedIn", + "timestamp": 1767937153038 + }, + "text": "Engagement ist kein Nice-to-have. Es ist der ehrlichste Indikator für funktionierende Markenkommunikation.\n\nIm Interview spricht Max Anzile, CEO der Social Media Pirates - we are hiring und Gründer der Black Flag Agency - Performance Marketing darüber, warum organisches Engagement heute wichtiger ist als reine Reichweite, wie Algorithmen Vereinfachung und Polarisierung verstärken,\nund weshalb KI Effizienz skaliert, aber keine Qualität ersetzt.\n\nEin Gespräch über Social Media jenseits von Buzzwords – zwischen Performance-Realität, Plattformverantwortung und der Frage, wie Marken unter Algorithmus-Druck relevant bleiben.\n\nhttps://lnkd.in/dVCd9chd", + "url": "https://www.linkedin.com/posts/business-punk_engagement-ist-kein-nice-to-have-es-ist-activity-7414935901157621761-77XI?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "repost", + "author": { + "first_name": "Business", + "last_name": "Punk", + "headline": "192,310 followers", + "profile_url": "https://www.linkedin.com/company/business-punk/posts" + }, + "stats": { + "total_reactions": 68, + "like": 59, + "support": 3, + "love": 4, + "insight": 1, + "celebrate": 1, + "funny": 0, + "comments": 12, + "reposts": 2 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D4D22AQFYagCD-dpw9g/feedshare-shrink_2048_1536/B4DZuck6E_LkA0-/0/1767858480734?e=1770249600&v=beta&t=i4yYETzULNCwsL5Knep7mRbmsjxce_yjnWPwIN0CHiQ", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D4D22AQFYagCD-dpw9g/feedshare-shrink_2048_1536/B4DZuck6E_LkA0-/0/1767858480734?e=1770249600&v=beta&t=i4yYETzULNCwsL5Knep7mRbmsjxce_yjnWPwIN0CHiQ", + "width": 1500, + "height": 855 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + }, + { + "urn": { + "activity_urn": "7414927614370385920", + "share_urn": "7409496092234461184", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7414927614370385920", + "posted_at": { + "date": "2026-01-08 08:15:05", + "relative": "1 week ago • Edited • Visible to anyone on or off LinkedIn", + "timestamp": 1767856505959 + }, + "text": "❝ 𝗞𝗜-𝗦𝘂𝗰𝗵𝗲 𝗶𝘀𝘁 𝗱𝗲𝗿 𝗲𝗿𝘀𝘁𝗲 𝗦𝗰𝗵𝗿𝗶𝘁𝘁 𝗶𝗺 𝗦𝗮𝗹𝗲𝘀 𝗙𝘂𝗻𝗻𝗲𝗹. Gute Kommunikation ist Sales-Infrastruktur. Das ist unsere Chance Christina!!! ❞\n\n\nDas sagte Max kurz vor Weihnachten in einem Café zu mir und ich schrie ihn fast an: \n\n\"Max!! Sag das nochmal!!! Das ist genial!!! Das ist unser nächster Post!\"\n\nUnd…\n\nDA IST ER:)!🚀\n\n\nWarum ich so begeistert war?\n\nIch glaube, dass Kommunikation unbedingt auch Sales „driven“ muss. \n\nMake It Matter ist entstanden, weil es nur wenige Agenturen gibt, die so radikal+konsequent Kommunikation als Sales- und Leads-Wachstumstreiber mitdenken wie wir.\n\nALLES was nicht auf die Unternehnensziele einzahlt muss radikal WEG.\n\nLisa und ich haben EIN Ziel: Die New Business- bzw. Vertriebsmannschaft soll uns lieben:)\n\n\nKI spielt uns in die Karten und ist wie ein Brennglas:\n\nDie KI entscheidet, wer und überhaupt noch empfohlen wird und wer auf Shortlists landet.\n\n\nJETZT muss man kommunikative KI-Signale setzen und das „Dach decken“. JETZT kann man KI-Pflöcke im Netz einziehen, damit man im Sales-Entscheidungsprozess VORNE sichtbar wird.\n\n𝗞𝗼𝗺𝗺𝘂𝗻𝗶𝗸𝗮𝘁𝗶𝗼𝗻 𝘁𝗿𝗲𝗶𝗯𝘁 𝗦𝗮𝗹𝗲𝘀 ü𝗯𝗲𝗿 𝗞𝗜.\nJetzt und künftig noch mehr!\n\n\n\nIm Gespräch mit Max wurde das sehr klar: KI recherchiert nicht mehr wie früher, sie priorisiert und bewertet, lange bevor der Vertrieb überhaupt spricht. \n\nEs gilt:\n\n👉Wenn KI ein Unternehmen nicht eindeutig einordnen kann, ist es für Sales nicht im Rennen.\n\n👉Genau hier wird Kommunikation zur Sales-Infrastruktur.\n\n\nMake It Matter Matter schafft dafür die strategische Grundlage aus Positionierung, Themenarchitektur und Public Relations als echte Third-Party-Validierung. \n\n(Klingt gut oder?! Und ist wahr!!)\n\nUnsre Stärken?\n\n👉LISA HIPP Hipp übersetzt diese Klarheit auf LinkedIn in wiederholbare Narrative, die Einordnung erzeugen. \n\n👉Max Anzile sorgt dafür, dass diese Signale gezielt distribuiert, getestet und messbar gemacht werden.\n\n👉 Und ich bin Kommunikation und Public Relations durch und durch (Konzern, Mittelstand, Agenturen. 25 Jahre\n\n\n\nSo entsteht für undere kein Content-Feuerwerk, sondern ein SYSTEM, das SALES-VORAUSWAHL gewinnt.\n\n💸💸💸\n\n\nWir sehen in der Praxis:\n\nDeals mit mehreren Kommunikationskontakten \n\n👉schliessen schneller und stabile\n👉und brauchen weniger Rabatt \n\nweil Vertrauen steigt und Vergleichbarkeit sinkt.\n\nUnsere Überzeugung ist klar: \n\nEntweder Kommunikation ist messbarer Teil des Sales Funnels oder sie wird 2026 gestrichen.\n\nSorry to say!!\nGood for us!!!!🙃\n\nChristina\n\n#Kommunikation\n#SalesSupport\n#SocialMedia\n#KISEO", + "url": "https://www.linkedin.com/posts/christinahildebrandt_kommunikation-salessupport-socialmedia-activity-7414927614370385920-nQPN?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "regular", + "author": { + "first_name": "Christina", + "last_name": "Hildebrandt", + "headline": "Co-Founder MAKE IT MATTER I Kommunikationsberatung, Interimsmanagement, PR & LinkedIn-Kompetenz für KMUs und Agenturen, die wachsen wollen", + "username": "christinahildebrandt", + "profile_url": "https://www.linkedin.com/in/christinahildebrandt?miniProfileUrn=urn%3Ali%3Afsd_profile%3AACoAABZ-U_wBolcXsKNwUaGEDfBwACvP5M5DkmQ", + "profile_picture": "https://media.licdn.com/dms/image/v2/D4D03AQFz-EjLfIatDw/profile-displayphoto-shrink_800_800/B4DZaIPVU5GwAg-/0/1746042443543?e=1770249600&v=beta&t=h61WiK5XGoOjPqfncwXZ6gtB_WvNT7HPs23MX79n3BE" + }, + "stats": { + "total_reactions": 74, + "like": 59, + "support": 0, + "love": 6, + "insight": 6, + "celebrate": 3, + "funny": 0, + "comments": 32, + "reposts": 2 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D5622AQEoxrN--83wDQ/feedshare-shrink_1280/B56ZtPRbJuJcAs-/0/1766561528601?e=1770249600&v=beta&t=drgs7rI2GEOGWUoeRwZsJNWQwUsPgs2ScPY0IH4OT1o", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D5622AQEoxrN--83wDQ/feedshare-shrink_1280/B56ZtPRbJuJcAs-/0/1766561528601?e=1770249600&v=beta&t=drgs7rI2GEOGWUoeRwZsJNWQwUsPgs2ScPY0IH4OT1o", + "width": 1252, + "height": 1680 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + }, + { + "urn": { + "activity_urn": "7413495163643121665", + "share_urn": "7413495162020028416", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7413495163643121665", + "posted_at": { + "date": "2026-01-04 09:23:03", + "relative": "2 weeks ago • Edited • Visible to anyone on or off LinkedIn", + "timestamp": 1767514983092 + }, + "text": "Ein unstrategischer Flashback-Post aus dem Flixbus Richtung Bodensee.\n#AboutEducatedAwareness\n#AboutMittelstand\n\n#AusDerHüfteGeschossen\nSorry Lisa🙈\n\n⸻\n\nGestern habe ich brav den Keller aufgeräumt und bin an einer Kiste alter Job-Fotos hängengeblieben.\n📸\n\nIch in Amsterdam mit Tommy Hilfiger. Beim Abendessen mit Lovely Annette Weber, Flohmärkte mit Chefredakteuren, Bilder mit Steffi Graf, Sönke Wortmann, Natalia Wörner und Heike Makatsch in Nördlingen bei Strenesse. Cartier-Juste-Un-Clou-Launch in New York.\n\nHalleluja war ich wichtig, dünn, lustig und jung :)\n\nWas für eine goldene, sorglose Zeit*.\n💫💫💫\n\nBei diesem Foto👇 musste ich so lachen. Ich weiß noch, wie es entstanden ist:\n\nWir waren Sponsor beim Bambi. (Wir – damit meine ich Cartier damals.)\n\nMein Chef Tom Meggle gab mir damals die Erlaubnis, meine Freundinnen Bine Käfer (jetzt Lanz), Celia von Bismarck und Gioia von Thun mitzunehmen.\n\nWichtig wichtig.\n\nWir also aufgedresst wie Bolle, kommen an den roten Teppich, Blitzgewitter, 4 Girlies. Dann ein Rufen aus der Fotografenmenge:\n\n„Christina!!! Kannst Du aus dem Bild gehen??? Wir brauchen die 3 Mädels alleine!!!“ (weil echt wichtig).\n\nIch weiß noch, wie ich fast zusammengebrochen bin vor Lachen. „Hey!! ICH habe DIE mitgenommen!!“ 🤣🤣🤣\n\nEin Bild von uns 4 hab ich dann aber doch noch bekommen. Plus einen wundervollen Abend.\n\n….\n\nWas das mit Make It Matter äund „Job“ zu tun hat?\n\nWenig. Aber ein bisschen schon:\n\n\nEs zeigt aber, wie sehr ich mich verändert habe:\n\n💫𝗛𝗲𝘂𝘁𝗲 𝗮𝗿𝗯𝗲𝗶𝘁𝗲 𝗶𝗰𝗵 𝗮𝗺 𝗹𝗶𝗲𝗯𝘀𝘁𝗲𝗻 𝗳𝘂̈𝗿 𝗨𝗻𝘁𝗲𝗿𝗻𝗲𝗵𝗺𝗲𝗻, 𝗱𝗶𝗲 𝗴𝗿𝗼ß𝗮𝗿𝘁𝗶𝗴 𝘀𝗶𝗻𝗱, 𝗮𝗯𝗲𝗿 𝘇𝘂 𝗹𝗲𝗶𝘀𝗲, 𝘇𝘂 𝗸𝗼𝗺𝗽𝗹𝗲𝘅 𝗼𝗱𝗲𝗿 𝘇𝘂 𝗯𝗲𝘀𝗰𝗵𝗲𝗶𝗱𝗲𝗻, 𝘂𝗺 𝘀𝗲𝗹𝗯𝘀𝘁 𝘂̈𝗯𝗲𝗿 𝗶𝗵𝗿𝗲 𝗦𝘁𝗮̈𝗿𝗸𝗲 𝘇𝘂 𝘀𝗽𝗿𝗲𝗰𝗵𝗲𝗻\n\n(Es gibt so so so tolle Firmen, von denen noch keiner etwas gehört hat, meistens mit unglaublich netten Teams!)\n\n\n💫 Heute habe ich mich in erklärungsbedürftige, komplizierte Produkte, EducatedAwareness und #LinkedIn als Teil der klassischen #Unternehmenskommunikation verliebt.\n\n💫 Heute liebe ich komplexe und „unsexy“ Aufgaben, die ich knacken will.\n\nSo verändert man sich.\nIst das nicht verrückt?\n\nSchön war’s trotzdem damals.\nMensch, bin ich dankbar!\n\nEuch einen schönen Sonntag\nEure Christina\n\nwww.make-it-matter.de\n\n#MakeItMatter\n#Kommunikation\n#Wachstumstreiber\n#Mittelstand\n#PublicRelations\n\nPS. Ich setzt mich jetzt gleich an das Papier liebe Lisa. Bis 11.15h gab ich fertig😉", + "url": "https://www.linkedin.com/posts/christinahildebrandt_abouteducatedawareness-aboutmittelstand-ausderhaesftegeschossen-activity-7413495163643121665-ghTT?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "regular", + "author": { + "first_name": "Christina", + "last_name": "Hildebrandt", + "headline": "Co-Founder MAKE IT MATTER I Kommunikationsberatung, Interimsmanagement, PR & LinkedIn-Kompetenz für KMUs und Agenturen, die wachsen wollen", + "username": "christinahildebrandt", + "profile_url": "https://www.linkedin.com/in/christinahildebrandt?miniProfileUrn=urn%3Ali%3Afsd_profile%3AACoAABZ-U_wBolcXsKNwUaGEDfBwACvP5M5DkmQ", + "profile_picture": "https://media.licdn.com/dms/image/v2/D4D03AQFz-EjLfIatDw/profile-displayphoto-shrink_800_800/B4DZaIPVU5GwAg-/0/1746042443543?e=1770249600&v=beta&t=h61WiK5XGoOjPqfncwXZ6gtB_WvNT7HPs23MX79n3BE" + }, + "stats": { + "total_reactions": 256, + "like": 203, + "support": 2, + "love": 33, + "insight": 3, + "celebrate": 6, + "funny": 9, + "comments": 35, + "reposts": 1 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D4D22AQFSwyP3pyzGvA/feedshare-shrink_1280/B4DZuIGiQEI8As-/0/1767514980532?e=1770249600&v=beta&t=PcOxfCx7DnRcny6dAO2ZIzX8CYIL5hrmr6-CxUq0hFE", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D4D22AQFSwyP3pyzGvA/feedshare-shrink_1280/B4DZuIGiQEI8As-/0/1767514980532?e=1770249600&v=beta&t=PcOxfCx7DnRcny6dAO2ZIzX8CYIL5hrmr6-CxUq0hFE", + "width": 1280, + "height": 1707 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + }, + { + "urn": { + "activity_urn": "7413276570640875520", + "share_urn": "7404415477738983424", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7404415479379062784", + "posted_at": { + "date": "2026-01-03 18:54:26", + "relative": "1 month ago • Edited • Visible to anyone on or off LinkedIn", + "timestamp": 1767462866459 + }, + "text": "W&V ernennt LinkedIn als Schlüsselplattform für CMOs \n\n\nDas Thema LinkedIn wird in vielen Unternehmen nach wie vor nur mit der Kneifzange angefasst.\n\nZu komplex, zu unklar, zu viel Aufwand....\n\nDabei liegt genau hier ein entscheidender Hebel für Sichtbarkeit, Vertrauen und letztlich Geschäftserfolg. Rolf Schröter, Chefredakteur der W&V, Werben & Verkaufen hat sich dem Thema angenommen und in einem aktuellen Beitrag herausgearbeitet, wie CMOs LinkedIn strategisch nutzen können (Link im Kommentar).\n\nDie Quintessenz des Artikels: \n➔ LinkedIn ist kein Add-on im Marketingmix, sondern strategisches Kernelement\n\nDass Christina und ich und unsere Firma Make It Matter bereits zum zweiten Mal unsere Sicht zu diesem Thema ergänzen dürfen, freut uns unglaublich! Nicht nur aus kommunikativer Sicht, sondern vor allem auch, weil es zeigt wie relevant dieses Thema aktuell ist und wie viele Firmen es umtreibt. \n\n\nViele Unternehmen stehen aktuell vor denselben Herausforderungen:\n- Wie schaffe ich es von meinen Kund:innen gehört zu werden?\n- Wie kann ich Sichtbarkeit gezielt auf Geschäftsziele einzahlen lassen, ob Recruiting, Markenaufbau oder Vertrieb?\n- Wie gelingt es, dass Kommunikation nicht im Tagesgeschäft versandet, sondern systematisch aufgebaut wird?\n\n\nGenau darauf haben wir uns spezialisiert:\nWir entwickeln Kommunikationsstrategien, die LinkedIn nicht als Endpunkt, sondern als Werkzeug verstehen! Eingebettet in ein funktionierendes System aus Positionierung, PR und vor allem funktionierenden Systemen.\n\nDas Ergebnis:\n➔ Klarere Positionierung im Markt\n➔ Sichtbarkeit bei den relevanten Zielgruppen\n➔ Entlastung für interne Teams durch systematische Prozesse\n➔ Mehr Vertrauen, bessere Anfragen, kürzere Entscheidungswege\n\n\nDass die W&V dieses Thema erneut aufgreift, zeigt: Es tut sich etwas. \n\nKommunikation wird (wieder) zum strategischen Thema. Auch und gerade auf Plattformen wie LinkedIn.\n\n\nWir freuen uns, Teil dieser Diskussion zu sein. Denn wer Kommunikation zum Wachstumstreiber machen möchte, braucht mehr als netten Content.\n\n\nTausend Dank lieber Rolf Schröter, dass wir einen Teil zu dieser wichtigen Diskussion beitragen dürfen! 🙏 🙏 🙏 \n\nPS: Wer es noch nicht hat - am besten gleich ein Abo abschließen! Lohnt sich allein für die Kolumne \"Rolf räumt auf\" schon - mein wöchentliches must read!", + "url": "https://www.linkedin.com/posts/lisa-hipp_wv-ernennt-linkedin-als-schl%C3%BCsselplattform-activity-7404415479379062784-oPfx?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "repost", + "author": { + "first_name": "LISA", + "last_name": "HIPP", + "headline": "Co-Founder of MAKE IT MATTER I Strategische Kommunikationsberatung, PR & LinkedIn-Kompetenz für KMUs und Agenturen, die wachsen wollen I Interimsmanager I Speakerin I Autorin", + "username": "lisa-hipp", + "profile_url": "https://www.linkedin.com/in/lisa-hipp?miniProfileUrn=urn%3Ali%3Afsd_profile%3AACoAABoIg7cBNIMrNROuYH0Eu8SrT_RBD19epvU", + "profile_picture": "https://media.licdn.com/dms/image/v2/D4E03AQF_O5ImniRHpA/profile-displayphoto-shrink_800_800/B4EZWVGXr8GgAg-/0/1741963230810?e=1770249600&v=beta&t=PGFdRS4hMFX9posogdbIRWixVm-4odWkunMbSm3nbgc" + }, + "stats": { + "total_reactions": 181, + "like": 126, + "support": 3, + "love": 10, + "insight": 4, + "celebrate": 38, + "funny": 0, + "comments": 66, + "reposts": 5 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D4D22AQGYvef95dFTnw/feedshare-shrink_2048_1536/B4DZsHEo9mJ8Aw-/0/1765350216712?e=1770249600&v=beta&t=6YGJTfe6owLABZt8qDSL6isgFEjupW5orIhO23fVy30", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D4D22AQGYvef95dFTnw/feedshare-shrink_2048_1536/B4DZsHEo9mJ8Aw-/0/1765350216712?e=1770249600&v=beta&t=6YGJTfe6owLABZt8qDSL6isgFEjupW5orIhO23fVy30", + "width": 1080, + "height": 1350 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + }, + { + "urn": { + "activity_urn": "7412408614184960000", + "share_urn": "7412408612888920064", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7412408614184960000", + "posted_at": { + "date": "2026-01-01 09:25:29", + "relative": "2 weeks ago • Edited • Visible to anyone on or off LinkedIn", + "timestamp": 1767255929514 + }, + "text": "𝗪𝗘𝗥 𝗠𝗔𝗖𝗛𝗧 𝗠𝗜𝗧??? Ich habe entschieden, dass 2026 meine zweite Pubertät beginnt\n#MakeItMatter🚀\n\nHalleluja!\nEndlich hat das neue Jahr begonnen.\n\nMit meinen Freunden habe ich gestern beschlossen, dass ich das kommende Jahr jetzt mal völlig neu angehen werde:\n\nIch stelle mir einfach vor, ich wäre in meiner zweiten Pubertät!!!\n\nOk, ok, dieses Mal mit besserem Wein, einem kleinen Kontopuffer, 25 Jahren Berufserfahrung, besserem WLAN, aber mit dem gleichen Gefühl von damals:\n\n𝗗𝗘𝗥 𝗡𝗔̈𝗖𝗛𝗦𝗧𝗘 𝗟𝗘𝗕𝗘𝗡𝗦𝗔𝗕𝗦𝗖𝗛𝗡𝗜𝗧𝗧 𝗪𝗜𝗥𝗗 𝗗𝗘𝗥 𝗕𝗘𝗦𝗧𝗘!!!\n\nIch fühle es!\n\nAlles liegt vor mir und ich kann Pippi-Langstrumpf-mäßig einfach alles erreichen, à la:\n\n„𝘐𝘤𝘩 𝘩𝘢𝘣 𝘥𝘢𝘴 𝘯𝘰𝘤𝘩 𝘯𝘪𝘦 𝘨𝘦𝘮𝘢𝘤𝘩𝘵 – 𝘥𝘢𝘴 𝘬𝘢𝘯𝘯 𝘪𝘤𝘩 𝘣𝘦𝘴𝘵𝘪𝘮𝘮𝘵.“\n\n(PS: Wann haben wir das eigentlich verlernt?)\n\nIch finde die Ähnlichkeit zu meinem (nicht mehr pubertierenden) Sohn Lenny wirklich erstaunlich:\n\nEr ist genauso begeistert von der Idee, im Ausland zu studieren und sich etwas Eigenes, Großes aufzubauen, wie ich besessen von Lisas und meiner Make-It-Matter-Idee bin.\n\nJetzt sitzen wir auf dem Driverseat unseres Lebens und können Kommunikations-Burgen aufbauen: das, was wir am allerbesten können!\n\nWir mussten gestern bei dem Erste-und-Zweite-Pubertäts-Vergleich wirklich lachen:\n\nAuch die Hormonprobleme sind ähnlich. Nur habe ich meine im Griff bzw. hinter mir.\nLenny hat noch eine (aufregende) Reise vor sich.\n\nMuss man da nicht automatisch grinsen?\n\nDieses Grinsen werde ich mir für 2026 vornehmen. Ich werde mich öfter an den Sternenzauber meiner ersten Pubertät erinnern, an die unaufhaltsame Kraft, die Fröhlichkeit, den Mut, die Neugierde und die Ausdauer.\n\nUnd wisst ihr, worauf ich mich am meisten freue?\n\nAuf die Momente, in denen ich bei den Social Media Pirates - we are hiring – we are hiring – ins Büro komme und Max Anzile (der übrigens 20 Jahre jünger ist als ich) zu mir sagt:\n\n„Guten Morgen, Boomer!“\n…und frech grinst. Auf seine KI-Sessions, seine Ideen, neue Welten, Hummeln im Popo.\n\nUnd natürlich LISA HIPP, die allerallerallerbeste Geschäftspartnerin, die ich mir vorstellen kann.\n\n#ZamReinZamRaus\n\nSeid ihr dabei? 💫\nZweite Pubertät ab 2026? 💪\nVollgas? ✔️\nLebensfreude? 🫶\nKommunikation neu denken? ✔️\nJahr des Feuerpferdes? 🐎\n\nDann GALOPP!!!!!\n\nIch wünsche euch, dass all eure Träume und Wünsche in Erfüllung gehen und dass ihr den Mut habt, etwas dafür zu tun!\n\nIch habe die Tage dazu etwas Tolles gelesen:\n\n„Wenn die Sehnsucht größer ist als die Angst, wird Mut, Erfolg und Lebensfreude geboren.“\n\nIst das nicht schön?\n\nHappy New Year und happy neue Lebensphase(n)\n\nwünscht Euch\nChristina\n\n#MakeItMatter\n#Kommunikation\n#PublicRelations\n#LinkedInComms", + "url": "https://www.linkedin.com/posts/christinahildebrandt_makeitmatter-zamreinzamraus-makeitmatter-activity-7412408614184960000-QsHK?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "regular", + "author": { + "first_name": "Christina", + "last_name": "Hildebrandt", + "headline": "Co-Founder MAKE IT MATTER I Kommunikationsberatung, Interimsmanagement, PR & LinkedIn-Kompetenz für KMUs und Agenturen, die wachsen wollen", + "username": "christinahildebrandt", + "profile_url": "https://www.linkedin.com/in/christinahildebrandt?miniProfileUrn=urn%3Ali%3Afsd_profile%3AACoAABZ-U_wBolcXsKNwUaGEDfBwACvP5M5DkmQ", + "profile_picture": "https://media.licdn.com/dms/image/v2/D4D03AQFz-EjLfIatDw/profile-displayphoto-shrink_800_800/B4DZaIPVU5GwAg-/0/1746042443543?e=1770249600&v=beta&t=h61WiK5XGoOjPqfncwXZ6gtB_WvNT7HPs23MX79n3BE" + }, + "stats": { + "total_reactions": 188, + "like": 139, + "support": 2, + "love": 32, + "insight": 1, + "celebrate": 11, + "funny": 3, + "comments": 84, + "reposts": 1 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D4D22AQEGzsh77ciLGg/feedshare-shrink_1280/B4DZt4qVzPKoAs-/0/1767255927836?e=1770249600&v=beta&t=_QpVfCET4jWABpFB-eW45a_tCJzEcMPorza6cfjlFrU", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D4D22AQEGzsh77ciLGg/feedshare-shrink_1280/B4DZt4qVzPKoAs-/0/1767255927836?e=1770249600&v=beta&t=_QpVfCET4jWABpFB-eW45a_tCJzEcMPorza6cfjlFrU", + "width": 1263, + "height": 1558 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + }, + { + "urn": { + "activity_urn": "7411645447343308802", + "share_urn": "7401690967386501120", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7401866535658606592", + "posted_at": { + "date": "2025-12-30 06:52:56", + "relative": "1 month ago • Visible to anyone on or off LinkedIn", + "timestamp": 1767073976360 + }, + "text": "AKT 3 – UNSER ANGEBOT FÜR DICH!\n\n\nFalls du Akt 1 und 2 von Christina und LISA gelesen hast, kennst du unser \"Warum\" schon.\n\nHeute geht es um die Frage, die dich vermutlich am meisten interessiert:\n\n\nWAS DU VON UNS BEKOMMST:\n\nKommunikation beginnt bei der Unternehmensstrategie und endet nicht beim Posting. Sie soll Klarheit schaffen, Vertrauen aufbauen und vor allem: WIRKEN!\nGenau darauf zahlen alle unsere Leistungen ein!\n\n\nUNSER ANGEBOT:\n\n1. Kommunikationsberatung\nWir entwickeln Unternehmens- und Markenkommunikation, Positionierung, Personal Branding, Social Media- und PR-Setups, die unmittelbar auf deine Unternehmensziele einzahlen. Alles andere ist Ressourcenverschwendung!\n\n2. Public Relations\nWir platzieren dich und dein Unternehmen in den wichtigsten Wirtschafts-, Fach- und Leitmedien.\n\n3. Trainings für Teams\nWir machen Teams zu Multiplikatoren. Storytelling, Content-Strategien, Social Media Enablement, PR x Plattformlogik und KI im Alltag. Praxisnah und verständlich, damit unser Wissen auch dann noch Wirkung hat, wenn wir schon längst nicht mehr an Bord sind.\n\n4. LinkedIn\nWir machen LinkedIn zum Business-Tool: für Sichtbarkeit, Recruiting, Leadgenerierung und Thought Leadership. Von Plattformstrategie über Corporate-Influencer-Programme bis zu Full Service inklusive Ghostwriting.\n\n5. Keynotes\nWir bringen unser Wissen auch auf die Bühne: mit Keynotes zu strategischer Kommunikation, Positionierung, Personal Branding, LinkedIn und der Zukunft von Business-Kommunikation.\n\n6. Interims-Management\nWenn der Druck steigt, springen wir ein: als erfahrene Sparringspartnerinnen auf C-Level für Strategie, Kommunikation, PR und Marke in bewegten Zeiten.\n\n\nWIE WIR ARBEITEN:\n\nHinter uns steht ein kuratiertes Netzwerk aus über 70 Expert:innen: von Text bis TikTok, von Snapchat über Employer Branding bis Meta Ads.\n\nDas heißt für dich:\n– ein Ansprechpartner\n– keine Overhead-Kosten\n– kurze Wege\n– Genau die Spezialist:innen, die DU gerade brauchst, und nicht die, die gerade ausgelastet werden müssen\n\nOder anders gesagt: höchstes Beratungsniveau plus hochwertige Umsetzungs-Werkbank, ohne den typische Wasserkopf.\n\n\nWAS DICH AUF DIESEM ACCOUNT ERWARTET:\n\nWenn du uns hier folgst, bekommst du in den nächsten Wochen unter anderem:\n➔ Einblicke, wie wir Kommunikationssysteme aufbauen, die in Budgetrunden bestehen\n➔ Beispiele, wie LinkedIn, PR und interne Kommunikation sinnvoll verzahnt werden\n➔ Perspektiven, wie KI dir in der Kommunikation wirklich hilft und wo sie nichts zu suchen hat\n➔ Fehler, die wir in 40+ Jahren Kommunikation immer wieder sehen und wie du sie vermeidest\n➔ Tools, Fragen und Denkmodelle, die du direkt in deinem Alltag umsetzen kannst\n\n\n🎁 UNSER GOODIE 🎁\n\nVor Weihnachten haben wir noch eine Überraschung für dich:\nAuf diesem Account verlosen wir ein exklusives Format mit uns, etwas, das es so nicht als Produkt auf der Website gibt.\n\nFür Menschen und Marken, die mehr wollen als Sichtbarkeit.\nMAKE IT MATTER!", + "url": "https://www.linkedin.com/posts/makeitmatter-communication_akt-3-unser-angebot-f%C3%BCr-dich-falls-du-activity-7401866535658606592-LsdM?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "repost", + "author": { + "first_name": "Make", + "last_name": "It Matter", + "headline": "461 followers", + "profile_url": "https://www.linkedin.com/company/makeitmatter-communication/posts" + }, + "stats": { + "total_reactions": 173, + "like": 131, + "support": 3, + "love": 15, + "insight": 2, + "celebrate": 22, + "funny": 0, + "comments": 72, + "reposts": 4 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D4E22AQEpSbAgeucWhg/feedshare-shrink_1280/B4EZrgWsBoKcAw-/0/1764700642073?e=1770249600&v=beta&t=GGSjT_vmz5CzAAPx--AyFkRcuVHNV9S7v87OrVDS2cY", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D4E22AQEpSbAgeucWhg/feedshare-shrink_1280/B4EZrgWsBoKcAw-/0/1764700642073?e=1770249600&v=beta&t=GGSjT_vmz5CzAAPx--AyFkRcuVHNV9S7v87OrVDS2cY", + "width": 1280, + "height": 1599 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + }, + { + "urn": { + "activity_urn": "7411644491889246208", + "share_urn": "7406801427307589632", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7407309899174486016", + "posted_at": { + "date": "2025-12-30 06:49:08", + "relative": "1 month ago • Edited • Visible to anyone on or off LinkedIn", + "timestamp": 1767073748562 + }, + "text": "GEWINNE EIN EXKLUSIVES STRATEGIE-SPARRING\n\nDu hast ein Kommunikations-Thema, bei dem du feststeckst? Dir fehlt die Idee, wie Du deine Marketing Botschaften strategisch mit deinen Unternehmenszielen verlinkst? Dein Social Media Team kommt immer wieder mit Vorschlägen wie dem Welt-Strumpfhosen-Tag?\n\nSprich, dir fehlt Klarheit in der Kommunikation deiner Marke, auf LinkedIn, in deiner Positionierung oder im Bereich PR?\n\nDann kommt hier DEIN Weihnachtsgeschenk:\n\nWir verlosen 1x gratis Strategie-Booster-Call:\n • 60 Minuten Sparring mit Christina Hildebrandt und LISA HIPP, den Gründerinnen von Make It Matter\n • Fokus: Kommunikationsstrategie, Kommunikation als Wachstumstreiber, LinkedIn, Thought Leadership, PR, etc.\n \nWas das Format besonders macht:\n – Es ist nicht buchbar 🤫 \n – Wir gehen 60 Minuten mit vollem Fokus auf deine individuellen Needs ein (Deep dive und Tabula Rasa)\n – Direkte Ideen und konkrete Lösungen\n\nLUST? \n\nSo kannst Du teilnehmen:\n • Folge unserem Account Make It Matter\n • Melde dich zu unserem Newsletter an\n • Teilnahmeschluss: 24.12 🎄 \n\n\n✨ ✨ ✨ ✨ ✨ ✨ \n\nUm Teilzunehmen einmal auf unsere Website nach unten scrollen und im Newsletter eintragen\nhttps://make-it-matter.de\n\n✨ ✨ ✨ ✨ ✨ ✨ \n\n\nWir drücken die Daumen!", + "url": "https://www.linkedin.com/posts/makeitmatter-communication_gewinne-ein-exklusives-strategie-sparring-activity-7407309899174486016-em8s?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "repost", + "author": { + "first_name": "Make", + "last_name": "It Matter", + "headline": "461 followers", + "profile_url": "https://www.linkedin.com/company/makeitmatter-communication/posts" + }, + "stats": { + "total_reactions": 142, + "like": 113, + "support": 2, + "love": 12, + "insight": 1, + "celebrate": 14, + "funny": 0, + "comments": 153, + "reposts": 6 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D4E22AQGleDz_WX7P0g/feedshare-shrink_1280/B4EZso.ojCHoAs-/0/1765919070643?e=1770249600&v=beta&t=8sPMf2fu4D4XmNbtx6zTWQ28Havbe1lb6U8tYlbELNs", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D4E22AQGleDz_WX7P0g/feedshare-shrink_1280/B4EZso.ojCHoAs-/0/1765919070643?e=1770249600&v=beta&t=8sPMf2fu4D4XmNbtx6zTWQ28Havbe1lb6U8tYlbELNs", + "width": 1280, + "height": 1600 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + }, + { + "urn": { + "activity_urn": "7409072824025534464", + "share_urn": "7398196790556237825", + "ugcPost_urn": null + }, + "full_urn": "urn:li:activity:7409072824025534464", + "posted_at": { + "date": "2025-12-23 04:30:15", + "relative": "0 months ago • Edited • Visible to anyone on or off LinkedIn", + "timestamp": 1766460615164 + }, + "text": "#agediversity #WildeHilde\nF R Ü H E R: Dauerfeuer, Missionseifer, Dauerdruck, Empörung, Verzweiflung, unendliche Diskussionen, ständig „noch schnell eins oben drauf“. Perfektionismus als Pflichtprogramm. Ungeduld, \nVollgas, All-In… immer, überall. \n\nUnd dieses eine, alte Mantra:\n„Wenn ich’s nicht rette … wer dann 🤷‍♀️?“\n\n\nH E U T E: Erfahrung, Prioritäten, Netzwerk, KI-Erleichterung, Abkürzungen, 110 % bewusst eingesetzt, Herzblut mit Schutzfilter, \nKlarheit, Ja zum Nein, Beobchten & Schweigen, Qualität, Ruhe & Begeisterung, Neugierde\nAnpassungsfähigkeit. Und: \nHUMOR, VERTRAUEN, INTUITION\n \nUnd die klare Erkenntnis:\n\n„Ich rette heute nichts mehr und kämpfe nichts mehr durch gegen mühsamen Widerstand. 🤷‍♀️“\n\nÄlter werden? \nWunderbar!\n\nBitte mehr davon!!!\nChristina\n\nPS. Danke 2025: Das erste Mal Angst-entfesselte Firmen-Gründerin mit meiner Geschäftspartnerin LISA HIPP und unsere Partner Max Anzile (1+1+1=10)🤷‍♀️\n\nDanke 2024\nDanke www.make-it-matter.de\nDanke #Wunderlisa🤍\nDanke Max Anzile … ich bin soooo gerne Dein Boomer!!! So so gerne🫶", + "url": "https://www.linkedin.com/posts/christinahildebrandt_agediversity-wildehilde-wunderlisa-activity-7409072824025534464-uYb9?utm_source=social_share_send&utm_medium=member_desktop_web&rcm=ACoAAGMGPDkBwcLGZeQ7c5M2vUoBf0G76hhvo1g", + "post_type": "regular", + "author": { + "first_name": "Christina", + "last_name": "Hildebrandt", + "headline": "Co-Founder MAKE IT MATTER I Kommunikationsberatung, Interimsmanagement, PR & LinkedIn-Kompetenz für KMUs und Agenturen, die wachsen wollen", + "username": "christinahildebrandt", + "profile_url": "https://www.linkedin.com/in/christinahildebrandt?miniProfileUrn=urn%3Ali%3Afsd_profile%3AACoAABZ-U_wBolcXsKNwUaGEDfBwACvP5M5DkmQ", + "profile_picture": "https://media.licdn.com/dms/image/v2/D4D03AQFz-EjLfIatDw/profile-displayphoto-shrink_800_800/B4DZaIPVU5GwAg-/0/1746042443543?e=1770249600&v=beta&t=h61WiK5XGoOjPqfncwXZ6gtB_WvNT7HPs23MX79n3BE" + }, + "stats": { + "total_reactions": 268, + "like": 188, + "support": 29, + "love": 13, + "insight": 3, + "celebrate": 25, + "funny": 10, + "comments": 81, + "reposts": 3 + }, + "media": { + "type": "image", + "url": "https://media.licdn.com/dms/image/v2/D4D22AQFNSuPF7v-ciA/feedshare-shrink_2048_1536/B4DZqusxbDJMAw-/0/1763867566273?e=1770249600&v=beta&t=5h5VmVC_at1s5E0-35p7G7y-YsblnmFt36y2D_MRVtw", + "images": [ + { + "url": "https://media.licdn.com/dms/image/v2/D4D22AQFNSuPF7v-ciA/feedshare-shrink_2048_1536/B4DZqusxbDJMAw-/0/1763867566273?e=1770249600&v=beta&t=5h5VmVC_at1s5E0-35p7G7y-YsblnmFt36y2D_MRVtw", + "width": 1283, + "height": 1226 + } + ] + }, + "pagination_token": "dXJuOmxpOmFjdGl2aXR5OjczMjQzMTgyODc0MjM1NTM1MzYtMTc0NjI1MzU1ODk1NA==" + } +] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..c881140 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,26 @@ +# Core Dependencies +python-dotenv==1.0.0 +pydantic==2.5.0 +pydantic-settings==2.1.0 + +# AI & APIs +openai==1.54.0 +apify-client==1.7.0 + +# Database +supabase==2.9.1 + +# TUI +textual==0.85.0 +rich==13.7.0 + +# Utilities +tenacity==8.2.3 +loguru==0.7.2 +httpx==0.27.0 + +# Web Frontend +fastapi==0.115.0 +uvicorn==0.32.0 +jinja2==3.1.4 +python-multipart==0.0.9 diff --git a/run_web.py b/run_web.py new file mode 100644 index 0000000..6997ab7 --- /dev/null +++ b/run_web.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +"""Run the web frontend.""" +import uvicorn + +if __name__ == "__main__": + print("\n" + "=" * 50) + print(" LinkedIn Posts Dashboard") + print(" http://localhost:8000") + print("=" * 50 + "\n") + + uvicorn.run( + "src.web.app:app", + host="0.0.0.0", + port=8000, + reload=True + ) diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..5237c40 --- /dev/null +++ b/src/__init__.py @@ -0,0 +1,2 @@ +"""LinkedIn Workflow System - Main package.""" +__version__ = "1.0.0" diff --git a/src/agents/__init__.py b/src/agents/__init__.py new file mode 100644 index 0000000..a1b94ee --- /dev/null +++ b/src/agents/__init__.py @@ -0,0 +1,20 @@ +"""AI Agents module.""" +from src.agents.base import BaseAgent +from src.agents.profile_analyzer import ProfileAnalyzerAgent +from src.agents.topic_extractor import TopicExtractorAgent +from src.agents.researcher import ResearchAgent +from src.agents.writer import WriterAgent +from src.agents.critic import CriticAgent +from src.agents.post_classifier import PostClassifierAgent +from src.agents.post_type_analyzer import PostTypeAnalyzerAgent + +__all__ = [ + "BaseAgent", + "ProfileAnalyzerAgent", + "TopicExtractorAgent", + "ResearchAgent", + "WriterAgent", + "CriticAgent", + "PostClassifierAgent", + "PostTypeAnalyzerAgent", +] diff --git a/src/agents/base.py b/src/agents/base.py new file mode 100644 index 0000000..f9a8e0c --- /dev/null +++ b/src/agents/base.py @@ -0,0 +1,120 @@ +"""Base agent class.""" +import asyncio +from abc import ABC, abstractmethod +from typing import Any, Dict, Optional +from openai import OpenAI +import httpx +from loguru import logger + +from src.config import settings + + +class BaseAgent(ABC): + """Base class for all AI agents.""" + + def __init__(self, name: str): + """ + Initialize base agent. + + Args: + name: Name of the agent + """ + self.name = name + self.openai_client = OpenAI(api_key=settings.openai_api_key) + logger.info(f"Initialized {name} agent") + + @abstractmethod + async def process(self, *args, **kwargs) -> Any: + """Process the agent's task.""" + pass + + async def call_openai( + self, + system_prompt: str, + user_prompt: str, + model: str = "gpt-4o", + temperature: float = 0.7, + response_format: Optional[Dict[str, str]] = None + ) -> str: + """ + Call OpenAI API. + + Args: + system_prompt: System message + user_prompt: User message + model: Model to use + temperature: Temperature for sampling + response_format: Optional response format (e.g., {"type": "json_object"}) + + Returns: + Assistant's response + """ + logger.info(f"[{self.name}] Calling OpenAI ({model})") + + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt} + ] + + kwargs = { + "model": model, + "messages": messages, + "temperature": temperature + } + + if response_format: + kwargs["response_format"] = response_format + + # Run synchronous OpenAI call in thread pool to avoid blocking event loop + response = await asyncio.to_thread( + self.openai_client.chat.completions.create, + **kwargs + ) + + result = response.choices[0].message.content + logger.debug(f"[{self.name}] Received response (length: {len(result)})") + + return result + + async def call_perplexity( + self, + system_prompt: str, + user_prompt: str, + model: str = "sonar" + ) -> str: + """ + Call Perplexity API for research. + + Args: + system_prompt: System message + user_prompt: User message + model: Model to use + + Returns: + Assistant's response + """ + logger.info(f"[{self.name}] Calling Perplexity ({model})") + + url = "https://api.perplexity.ai/chat/completions" + headers = { + "Authorization": f"Bearer {settings.perplexity_api_key}", + "Content-Type": "application/json" + } + + payload = { + "model": model, + "messages": [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt} + ] + } + + async with httpx.AsyncClient() as client: + response = await client.post(url, json=payload, headers=headers, timeout=60.0) + response.raise_for_status() + result = response.json() + + content = result["choices"][0]["message"]["content"] + logger.debug(f"[{self.name}] Received Perplexity response (length: {len(content)})") + + return content diff --git a/src/agents/critic.py b/src/agents/critic.py new file mode 100644 index 0000000..cd24ef1 --- /dev/null +++ b/src/agents/critic.py @@ -0,0 +1,276 @@ +"""Critic agent for reviewing and improving LinkedIn posts.""" +import json +from typing import Dict, Any, Optional, List +from loguru import logger + +from src.agents.base import BaseAgent + + +class CriticAgent(BaseAgent): + """Agent for critically reviewing LinkedIn posts and suggesting improvements.""" + + def __init__(self): + """Initialize critic agent.""" + super().__init__("Critic") + + async def process( + self, + post: str, + profile_analysis: Dict[str, Any], + topic: Dict[str, Any], + example_posts: Optional[List[str]] = None, + iteration: int = 1, + max_iterations: int = 3 + ) -> Dict[str, Any]: + """ + Review a LinkedIn post and provide feedback. + + Args: + post: The post to review + profile_analysis: Profile analysis results + topic: Topic information + example_posts: Optional list of real posts to compare style against + iteration: Current iteration number (1-based) + max_iterations: Maximum number of iterations allowed + + Returns: + Dictionary with approval status and feedback + """ + logger.info(f"Reviewing post for quality and authenticity (iteration {iteration}/{max_iterations})") + + system_prompt = self._get_system_prompt(profile_analysis, example_posts, iteration, max_iterations) + user_prompt = self._get_user_prompt(post, topic, iteration, max_iterations) + + response = await self.call_openai( + system_prompt=system_prompt, + user_prompt=user_prompt, + model="gpt-4o-mini", + temperature=0.3, + response_format={"type": "json_object"} + ) + + # Parse response + result = json.loads(response) + + is_approved = result.get("approved", False) + logger.info(f"Post {'APPROVED' if is_approved else 'NEEDS REVISION'}") + + return result + + def _get_system_prompt(self, profile_analysis: Dict[str, Any], example_posts: Optional[List[str]] = None, iteration: int = 1, max_iterations: int = 3) -> str: + """Get system prompt for critic - orientiert an bewährten n8n-Prompts.""" + writing_style = profile_analysis.get("writing_style", {}) + linguistic = profile_analysis.get("linguistic_fingerprint", {}) + tone_analysis = profile_analysis.get("tone_analysis", {}) + phrase_library = profile_analysis.get("phrase_library", {}) + structure_templates = profile_analysis.get("structure_templates", {}) + + # Build example posts section for style comparison + examples_section = "" + if example_posts and len(example_posts) > 0: + examples_section = "\n\nECHTE POSTS DER PERSON (VERGLEICHE DEN STIL!):\n" + for i, post in enumerate(example_posts, 1): + post_text = post[:1200] + "..." if len(post) > 1200 else post + examples_section += f"\n--- Echtes Beispiel {i} ---\n{post_text}\n" + examples_section += "--- Ende Beispiele ---\n" + + # Safe extraction of signature phrases + sig_phrases = linguistic.get('signature_phrases', []) + sig_phrases_str = ', '.join(sig_phrases) if sig_phrases else 'Keine spezifischen' + + # Extract phrase library for style matching + hook_phrases = phrase_library.get('hook_phrases', []) + emotional_expressions = phrase_library.get('emotional_expressions', []) + cta_phrases = phrase_library.get('cta_phrases', []) + + # Extract structure info + primary_structure = structure_templates.get('primary_structure', 'Hook → Body → CTA') + + # Iteration-aware guidance + iteration_guidance = "" + if iteration == 1: + iteration_guidance = """ +ERSTE ITERATION - Fokus auf die WICHTIGSTEN Verbesserungen: +- Konzentriere dich auf maximal 2-3 kritische Punkte +- Gib SEHR SPEZIFISCHE Änderungsanweisungen +- Kleine Stilnuancen können in späteren Iterationen optimiert werden +- Erwarteter Score-Bereich: 70-85 (selten höher beim ersten Entwurf)""" + elif iteration == max_iterations: + iteration_guidance = """ +LETZTE ITERATION - Faire Endbewertung: +- Der Post wurde bereits überarbeitet - würdige die Verbesserungen! +- Prüfe: Hat der Writer die vorherigen Kritikpunkte umgesetzt? +- Wenn JA und der Post authentisch klingt: Score 85-95 ist angemessen +- Wenn der Post WIRKLICH exzellent ist (klingt wie ein echtes Beispiel): 95-100 möglich +- ABER: Keine Inflation! Nur 90+ wenn es wirklich verdient ist +- Kleine Imperfektionen sind OK bei 85-89, nicht bei 90+""" + else: + iteration_guidance = f""" +ITERATION {iteration}/{max_iterations} - Fortschritt anerkennen: +- Prüfe ob vorherige Kritikpunkte umgesetzt wurden +- Wenn Verbesserungen sichtbar: Score sollte steigen +- Fokussiere auf verbleibende Verbesserungen +- Erwarteter Score-Bereich: 75-90 (wenn erste Kritik gut umgesetzt)""" + + return f"""ROLLE: Du bist ein präziser Chefredakteur für Personal Branding. Deine Aufgabe ist es, einen LinkedIn-Entwurf zu bewerten und NUR dort Korrekturen vorzuschlagen, wo er gegen die Identität des Absenders verstößt oder typische KI-Muster aufweist. +{examples_section} +{iteration_guidance} + +REFERENZ-PROFIL (Der Maßstab): + +Branche: {profile_analysis.get('audience_insights', {}).get('industry_context', 'Business')} +Perspektive: {writing_style.get('perspective', 'Ich-Perspektive')} +Ansprache: {writing_style.get('form_of_address', 'Du/Euch')} +Energie-Level: {linguistic.get('energy_level', 7)}/10 (1=sachlich, 10=explosiv) +Signature Phrases: {sig_phrases_str} +Tonalität: {tone_analysis.get('primary_tone', 'Professionell')} +Erwartete Struktur: {primary_structure} + +PHRASEN-REFERENZ (Der Post sollte ÄHNLICHE Formulierungen nutzen - nicht identisch, aber im gleichen Stil): +- Hook-Stil Beispiele: {', '.join(hook_phrases[:3]) if hook_phrases else 'Keine verfügbar'} +- Emotionale Ausdrücke: {', '.join(emotional_expressions[:3]) if emotional_expressions else 'Keine verfügbar'} +- CTA-Stil Beispiele: {', '.join(cta_phrases[:2]) if cta_phrases else 'Keine verfügbar'} + + +CHIRURGISCHE KORREKTUR-REGELN (Prüfe diese Punkte!): + +1. SATZBAU-OPTIMIERUNG: + - Keine Gedankenstriche (–) zur Satzverbindung - diese wirken zu konstruiert + - Wenn Gedankenstriche gefunden werden: Vorschlagen, durch Kommas, Punkte oder Konjunktionen zu ersetzen + - Zwei eigenständige Sätze sind oft besser als ein verbundener + +2. ANSPRACHE-CHECK: + - Prüfe: Nutzt der Text konsequent die Form {writing_style.get('form_of_address', 'Du/Euch')}? + - Falls inkonsistent (z.B. Sie statt Du oder umgekehrt): Als Fehler markieren + +3. PERSPEKTIV-CHECK (Priorität 1!): + - Wenn das Profil {writing_style.get('perspective', 'Ich-Perspektive')} verlangt: + - Belehrende "Sie/Euch"-Sätze ("Stellt euch vor", "Ihr solltet") in Reflexionen umwandeln + - Besser: "Ich sehe immer wieder...", "Ich frage mich oft..." statt direkter Handlungsaufforderungen + +4. KI-MUSTER ERKENNEN: + - "In der heutigen Zeit", "Tauchen Sie ein", "Es ist kein Geheimnis" = SOFORT bemängeln + - "Stellen Sie sich vor", "Lassen Sie uns" = KI-typisch + - Zu perfekte, glatte Formulierungen ohne Ecken und Kanten + +5. ENERGIE-ABGLEICH: + - Passt die Intensität zum Energie-Level ({linguistic.get('energy_level', 7)}/10)? + - Zu lahm bei hohem Level oder zu überdreht bei niedrigem Level = Korrektur vorschlagen + +6. UNICODE & FORMATIERUNG: + - Prüfe den Hook: Ist Unicode-Fettung korrekt? (Umlaute ä, ö, ü, ß dürfen nicht zerstört sein) + - Keine Markdown-Sterne (**) - LinkedIn unterstützt das nicht + - Keine Trennlinien (---) + +7. PHRASEN & STRUKTUR-MATCH: + - Vergleiche den Stil mit den Phrasen-Referenzen oben + - Der Hook sollte IM GLEICHEN STIL sein wie die Hook-Beispiele (nicht identisch kopiert!) + - Emotionale Ausdrücke sollten ÄHNLICH sein (wenn die Person "Halleluja!" nutzt, sollte der Post auch emotionale Ausrufe haben) + - Der CTA sollte im gleichen Stil sein wie die CTA-Beispiele + - WICHTIG: Es geht um den STIL, nicht um wörtliches Kopieren! + + +BEWERTUNGSKRITERIEN (100 Punkte total): + +1. Authentizität & Stil-Match (40 Punkte) + - Klingt wie die echte Person (vergleiche mit Beispiel-Posts!) + - Keine KI-Muster erkennbar + - Richtige Energie und Tonalität + - Nutzt ÄHNLICHE Phrasen/Formulierungen wie in der Phrasen-Referenz (nicht identisch kopiert, aber im gleichen Stil!) + - Hat die Person typische emotionale Ausdrücke? Sind welche im Post? + +2. Content-Qualität (35 Punkte) + - Starker, aufmerksamkeitsstarker Hook (vergleiche mit Hook-Beispielen!) + - Klarer Mehrwert für die Zielgruppe + - Gute Struktur und Lesefluss (folgt der erwarteten Struktur: {primary_structure}) + - Passender CTA (vergleiche mit CTA-Beispielen!) + +3. Technische Korrektheit (25 Punkte) + - Richtige Perspektive und Ansprache (konsistent!) + - Angemessene Länge (~{writing_style.get('average_word_count', 300)} Wörter) + - Korrekte Formatierung + + +SCORE-KALIBRIERUNG (WICHTIG - lies das genau!): + +**90-100 Punkte = Exzellent, direkt veröffentlichbar** +- 100: Herausragend - Post klingt EXAKT wie die echte Person, perfekter Hook, null KI-Muster +- 95-99: Exzellent - Kaum von echtem Post unterscheidbar, minimale Verbesserungsmöglichkeiten +- 90-94: Sehr gut - Authentisch, professionell, kleine Stilnuancen könnten besser sein + +**85-89 Punkte = Gut, veröffentlichungsreif** +- Der Post funktioniert, erfüllt alle wichtigen Kriterien +- Vielleicht 1-2 Formulierungen die noch besser sein könnten + +**75-84 Punkte = Solide Basis, aber Verbesserungen nötig** +- Grundstruktur stimmt, aber erkennbare Probleme +- Entweder KI-Muster, Stil-Mismatch oder technische Fehler + +**< 75 Punkte = Wesentliche Überarbeitung nötig** +- Mehrere gravierende Probleme +- Klingt nicht authentisch oder hat strukturelle Mängel + +APPROVAL-SCHWELLEN: +- >= 85 Punkte: APPROVED (veröffentlichungsreif) +- 75-84 Punkte: Fast fertig, kleine Anpassungen +- < 75 Punkte: Überarbeitung nötig + +WICHTIG: Gib 90+ Punkte wenn der Post es VERDIENT - nicht aus Großzügigkeit! +Ein Post der wirklich authentisch klingt und keine KI-Muster hat, SOLLTE 90+ bekommen. + + +WICHTIG FÜR DEIN FEEDBACK: +- Gib EXAKTE Formulierungsvorschläge: "Ändere 'X' zu 'Y'" (nicht "verbessere den Hook") +- Maximal 3 konkrete Änderungen pro Iteration +- Erkenne umgesetzte Verbesserungen an und erhöhe den Score entsprechend +- Bei der letzten Iteration: Sei fair - gib 90+ wenn der Post es verdient, aber nicht aus Milde + +Antworte als JSON.""" + + def _get_user_prompt(self, post: str, topic: Dict[str, Any], iteration: int = 1, max_iterations: int = 3) -> str: + """Get user prompt for critic.""" + iteration_note = "" + if iteration > 1: + iteration_note = f"\n**HINWEIS:** Dies ist Iteration {iteration} von {max_iterations}. Der Post wurde bereits überarbeitet.\n" + if iteration == max_iterations: + iteration_note += """**FINALE BEWERTUNG:** +- Würdige umgesetzte Verbesserungen mit höherem Score +- 85+ = APPROVED wenn der Post authentisch und fehlerfrei ist +- 90+ = Nur wenn der Post wirklich exzellent ist (vergleiche mit echten Beispielen!) +- Sei fair, nicht großzügig - Qualität bleibt der Maßstab.\n""" + + return f"""Bewerte diesen LinkedIn-Post: +{iteration_note} +**THEMA:** {topic.get('title', 'Unknown')} + +**POST:** +{post} + +--- + +Antworte im JSON-Format: + +{{ + "approved": true/false, + "overall_score": 0-100, + "scores": {{ + "authenticity_and_style": 0-40, + "content_quality": 0-35, + "technical_execution": 0-25 + }}, + "strengths": ["Stärke 1", "Stärke 2"], + "improvements": ["Verbesserung 1", "Verbesserung 2"], + "feedback": "Kurze Zusammenfassung", + "specific_changes": [ + {{ + "original": "Exakter Text aus dem Post der geändert werden soll", + "replacement": "Der neue vorgeschlagene Text", + "reason": "Warum diese Änderung" + }} + ] +}} + +WICHTIG bei specific_changes: +- Gib EXAKTE Textstellen an die geändert werden sollen +- Maximal 3 Changes pro Iteration +- Der "original" Text muss EXAKT im Post vorkommen""" diff --git a/src/agents/post_classifier.py b/src/agents/post_classifier.py new file mode 100644 index 0000000..0665936 --- /dev/null +++ b/src/agents/post_classifier.py @@ -0,0 +1,279 @@ +"""Post classifier agent for categorizing LinkedIn posts into post types.""" +import json +import re +from typing import Dict, Any, List, Optional, Tuple +from uuid import UUID +from loguru import logger + +from src.agents.base import BaseAgent +from src.database.models import LinkedInPost, PostType + + +class PostClassifierAgent(BaseAgent): + """Agent for classifying LinkedIn posts into defined post types.""" + + def __init__(self): + """Initialize post classifier agent.""" + super().__init__("PostClassifier") + + async def process( + self, + posts: List[LinkedInPost], + post_types: List[PostType] + ) -> List[Dict[str, Any]]: + """ + Classify posts into post types. + + Uses a two-phase approach: + 1. Hashtag matching (fast, deterministic) + 2. Semantic matching via LLM (for posts without hashtag match) + + Args: + posts: List of posts to classify + post_types: List of available post types + + Returns: + List of classification results with post_id, post_type_id, method, confidence + """ + if not posts or not post_types: + logger.warning("No posts or post types to classify") + return [] + + logger.info(f"Classifying {len(posts)} posts into {len(post_types)} post types") + + classifications = [] + posts_needing_semantic = [] + + # Phase 1: Hashtag matching + for post in posts: + result = self._match_by_hashtags(post, post_types) + if result: + classifications.append(result) + else: + posts_needing_semantic.append(post) + + logger.info(f"Hashtag matching: {len(classifications)} matched, {len(posts_needing_semantic)} need semantic") + + # Phase 2: Semantic matching for remaining posts + if posts_needing_semantic: + semantic_results = await self._match_semantically(posts_needing_semantic, post_types) + classifications.extend(semantic_results) + + logger.info(f"Classification complete: {len(classifications)} total classifications") + return classifications + + def _extract_hashtags(self, text: str) -> List[str]: + """Extract hashtags from post text (lowercase for matching).""" + hashtags = re.findall(r'#(\w+)', text) + return [h.lower() for h in hashtags] + + def _match_by_hashtags( + self, + post: LinkedInPost, + post_types: List[PostType] + ) -> Optional[Dict[str, Any]]: + """ + Try to match post to a post type by hashtags. + + Args: + post: The post to classify + post_types: Available post types + + Returns: + Classification dict or None if no match + """ + post_hashtags = set(self._extract_hashtags(post.post_text)) + + if not post_hashtags: + return None + + best_match = None + best_match_count = 0 + + for pt in post_types: + if not pt.identifying_hashtags: + continue + + # Convert post type hashtags to lowercase for comparison + pt_hashtags = set(h.lower().lstrip('#') for h in pt.identifying_hashtags) + + # Count matching hashtags + matches = post_hashtags.intersection(pt_hashtags) + + if matches and len(matches) > best_match_count: + best_match = pt + best_match_count = len(matches) + + if best_match: + # Confidence based on how many hashtags matched + confidence = min(1.0, best_match_count * 0.25 + 0.5) + return { + "post_id": post.id, + "post_type_id": best_match.id, + "classification_method": "hashtag", + "classification_confidence": confidence + } + + return None + + async def _match_semantically( + self, + posts: List[LinkedInPost], + post_types: List[PostType] + ) -> List[Dict[str, Any]]: + """ + Match posts to post types using semantic analysis via LLM. + + Args: + posts: Posts to classify + post_types: Available post types + + Returns: + List of classification results + """ + if not posts: + return [] + + # Build post type descriptions for the LLM + type_descriptions = [] + for pt in post_types: + desc = f"- **{pt.name}** (ID: {pt.id})" + if pt.description: + desc += f": {pt.description}" + if pt.identifying_keywords: + desc += f"\n Keywords: {', '.join(pt.identifying_keywords[:10])}" + if pt.semantic_properties: + props = pt.semantic_properties + if props.get("purpose"): + desc += f"\n Purpose: {props['purpose']}" + if props.get("typical_tone"): + desc += f"\n Tone: {props['typical_tone']}" + type_descriptions.append(desc) + + type_descriptions_text = "\n".join(type_descriptions) + + # Process in batches for efficiency + batch_size = 10 + results = [] + + for i in range(0, len(posts), batch_size): + batch = posts[i:i + batch_size] + batch_results = await self._classify_batch(batch, post_types, type_descriptions_text) + results.extend(batch_results) + + return results + + async def _classify_batch( + self, + posts: List[LinkedInPost], + post_types: List[PostType], + type_descriptions: str + ) -> List[Dict[str, Any]]: + """Classify a batch of posts using LLM.""" + # Build post list for prompt + posts_list = [] + for i, post in enumerate(posts): + post_preview = post.post_text[:500] + "..." if len(post.post_text) > 500 else post.post_text + posts_list.append(f"[Post {i + 1}] (ID: {post.id})\n{post_preview}") + + posts_text = "\n\n".join(posts_list) + + # Build valid type IDs for validation + valid_type_ids = {str(pt.id) for pt in post_types} + valid_type_ids.add("null") # Allow unclassified + + system_prompt = """Du bist ein Content-Analyst, der LinkedIn-Posts in vordefinierte Kategorien einordnet. + +Analysiere jeden Post und ordne ihn dem passendsten Post-Typ zu. +Wenn kein Typ wirklich passt, gib "null" als post_type_id zurück. + +Bewerte die Zuordnung mit einer Confidence zwischen 0.3 und 1.0: +- 0.9-1.0: Sehr sicher, Post passt perfekt zum Typ +- 0.7-0.9: Gute Übereinstimmung +- 0.5-0.7: Moderate Übereinstimmung +- 0.3-0.5: Schwache Übereinstimmung, aber beste verfügbare Option + +Antworte im JSON-Format.""" + + user_prompt = f"""Ordne die folgenden Posts den verfügbaren Post-Typen zu: + +=== VERFÜGBARE POST-TYPEN === +{type_descriptions} + +=== POSTS ZUM KLASSIFIZIEREN === +{posts_text} + +=== ANTWORT-FORMAT === +Gib ein JSON-Objekt zurück mit diesem Format: +{{ + "classifications": [ + {{ + "post_id": "uuid-des-posts", + "post_type_id": "uuid-des-typs oder null", + "confidence": 0.8, + "reasoning": "Kurze Begründung" + }} + ] +}}""" + + try: + response = await self.call_openai( + system_prompt=system_prompt, + user_prompt=user_prompt, + model="gpt-4o-mini", + temperature=0.2, + response_format={"type": "json_object"} + ) + + result = json.loads(response) + classifications = result.get("classifications", []) + + # Process and validate results + valid_results = [] + for c in classifications: + post_id = c.get("post_id") + post_type_id = c.get("post_type_id") + confidence = c.get("confidence", 0.5) + + # Validate post_id exists + matching_post = next((p for p in posts if str(p.id) == post_id), None) + if not matching_post: + logger.warning(f"Invalid post_id in classification: {post_id}") + continue + + # Validate post_type_id + if post_type_id and post_type_id != "null" and post_type_id not in valid_type_ids: + logger.warning(f"Invalid post_type_id in classification: {post_type_id}") + continue + + if post_type_id and post_type_id != "null": + valid_results.append({ + "post_id": matching_post.id, + "post_type_id": UUID(post_type_id), + "classification_method": "semantic", + "classification_confidence": min(1.0, max(0.3, confidence)) + }) + + return valid_results + + except Exception as e: + logger.error(f"Semantic classification failed: {e}") + return [] + + async def classify_single_post( + self, + post: LinkedInPost, + post_types: List[PostType] + ) -> Optional[Dict[str, Any]]: + """ + Classify a single post. + + Args: + post: The post to classify + post_types: Available post types + + Returns: + Classification result or None + """ + results = await self.process([post], post_types) + return results[0] if results else None diff --git a/src/agents/post_type_analyzer.py b/src/agents/post_type_analyzer.py new file mode 100644 index 0000000..611a654 --- /dev/null +++ b/src/agents/post_type_analyzer.py @@ -0,0 +1,335 @@ +"""Post type analyzer agent for creating intensive analysis per post type.""" +import json +import re +from typing import Dict, Any, List +from loguru import logger + +from src.agents.base import BaseAgent +from src.database.models import LinkedInPost, PostType + + +class PostTypeAnalyzerAgent(BaseAgent): + """Agent for analyzing post types based on their classified posts.""" + + MIN_POSTS_FOR_ANALYSIS = 3 # Minimum posts needed for meaningful analysis + + def __init__(self): + """Initialize post type analyzer agent.""" + super().__init__("PostTypeAnalyzer") + + async def process( + self, + post_type: PostType, + posts: List[LinkedInPost] + ) -> Dict[str, Any]: + """ + Analyze a post type based on its posts. + + Args: + post_type: The post type to analyze + posts: Posts belonging to this type + + Returns: + Analysis dictionary with patterns and insights + """ + if len(posts) < self.MIN_POSTS_FOR_ANALYSIS: + logger.warning(f"Not enough posts for analysis: {len(posts)} < {self.MIN_POSTS_FOR_ANALYSIS}") + return { + "error": f"Mindestens {self.MIN_POSTS_FOR_ANALYSIS} Posts benötigt", + "post_count": len(posts), + "sufficient_data": False + } + + logger.info(f"Analyzing post type '{post_type.name}' with {len(posts)} posts") + + # Prepare posts for analysis + posts_text = self._prepare_posts_for_analysis(posts) + + # Get comprehensive analysis from LLM + analysis = await self._analyze_posts(post_type, posts_text, len(posts)) + + # Add metadata + analysis["post_count"] = len(posts) + analysis["sufficient_data"] = True + analysis["post_type_name"] = post_type.name + + logger.info(f"Analysis complete for '{post_type.name}'") + return analysis + + def _prepare_posts_for_analysis(self, posts: List[LinkedInPost]) -> str: + """Prepare posts text for analysis.""" + posts_sections = [] + for i, post in enumerate(posts, 1): + # Include full post text + posts_sections.append(f"=== POST {i} ===\n{post.post_text}\n=== ENDE POST {i} ===") + return "\n\n".join(posts_sections) + + async def _analyze_posts( + self, + post_type: PostType, + posts_text: str, + post_count: int + ) -> Dict[str, Any]: + """Run comprehensive analysis on posts.""" + + system_prompt = """Du bist ein erfahrener LinkedIn Content-Analyst und Ghostwriter-Coach. +Deine Aufgabe ist es, Muster und Stilelemente aus einer Sammlung von Posts zu extrahieren, +um einen "Styleguide" für diesen Post-Typ zu erstellen. + +Sei SEHR SPEZIFISCH und nutze ECHTE BEISPIELE aus den Posts! +Keine generischen Beschreibungen - immer konkrete Auszüge und Formulierungen. + +Antworte im JSON-Format.""" + + user_prompt = f"""Analysiere die folgenden {post_count} Posts vom Typ "{post_type.name}". +{f'Beschreibung: {post_type.description}' if post_type.description else ''} + +=== DIE POSTS === +{posts_text} + +=== DEINE ANALYSE === + +Erstelle eine detaillierte Analyse im folgenden JSON-Format: + +{{ + "structure_patterns": {{ + "typical_structure": "Beschreibe die typische Struktur (z.B. Hook → Problem → Lösung → CTA)", + "paragraph_count": "Typische Anzahl Absätze", + "paragraph_length": "Typische Absatzlänge in Worten", + "uses_lists": true/false, + "list_style": "Wenn Listen: Wie werden sie formatiert? (Bullets, Nummern, Emojis)", + "structure_template": "Eine Vorlage für die Struktur" + }}, + + "language_style": {{ + "tone": "Haupttonalität (z.B. inspirierend, sachlich, provokativ)", + "secondary_tones": ["Weitere Tonalitäten"], + "perspective": "Ich-Perspektive, Du-Ansprache, Wir-Form?", + "energy_level": 1-10, + "formality": "formell/informell/mix", + "sentence_types": "Kurz und knackig vs. ausführlich vs. mix", + "typical_sentence_starters": ["Echte Beispiele wie Sätze beginnen"], + "signature_phrases": ["Wiederkehrende Formulierungen"] + }}, + + "hooks": {{ + "hook_types": ["Welche Hook-Arten werden verwendet (Frage, Statement, Statistik, Story...)"], + "real_examples": [ + {{ + "hook": "Der genaue Hook-Text", + "type": "Art des Hooks", + "why_effective": "Warum funktioniert er?" + }} + ], + "hook_patterns": ["Muster die sich wiederholen"], + "average_hook_length": "Wie lang sind Hooks typischerweise?" + }}, + + "ctas": {{ + "cta_types": ["Welche CTA-Arten (Frage, Aufforderung, Teilen-Bitte...)"], + "real_examples": [ + {{ + "cta": "Der genaue CTA-Text", + "type": "Art des CTAs" + }} + ], + "cta_position": "Wo steht der CTA typischerweise?", + "cta_intensity": "Wie direkt/stark ist der CTA?" + }}, + + "visual_patterns": {{ + "emoji_usage": {{ + "frequency": "hoch/mittel/niedrig/keine", + "typical_emojis": ["Die häufigsten Emojis"], + "placement": "Wo werden Emojis platziert?", + "purpose": "Wofür werden sie genutzt?" + }}, + "line_breaks": "Wie werden Absätze/Zeilenumbrüche genutzt?", + "formatting": "Unicode-Fett, Großbuchstaben, Sonderzeichen?", + "whitespace": "Viel/wenig Whitespace?" + }}, + + "length_patterns": {{ + "average_words": "Durchschnittliche Wortanzahl", + "range": "Von-bis Wortanzahl", + "ideal_length": "Empfohlene Länge für diesen Typ" + }}, + + "recurring_elements": {{ + "phrases": ["Wiederkehrende Phrasen und Formulierungen"], + "transitions": ["Typische Übergänge zwischen Absätzen"], + "closings": ["Typische Schlussformulierungen vor dem CTA"] + }}, + + "content_focus": {{ + "main_themes": ["Hauptthemen dieses Post-Typs"], + "value_proposition": "Welchen Mehrwert bieten diese Posts?", + "target_emotion": "Welche Emotion soll beim Leser ausgelöst werden?" + }}, + + "writing_guidelines": {{ + "dos": ["5-7 konkrete Empfehlungen was man TUN sollte"], + "donts": ["3-5 konkrete Dinge die man VERMEIDEN sollte"], + "key_success_factors": ["Was macht Posts dieses Typs erfolgreich?"] + }} +}} + +WICHTIG: +- Nutze ECHTE Textauszüge aus den Posts als Beispiele! +- Sei spezifisch, nicht generisch +- Wenn ein Muster nur in 1-2 Posts vorkommt, erwähne es trotzdem aber markiere es als "vereinzelt" +- Alle Beispiele müssen aus den gegebenen Posts stammen""" + + try: + response = await self.call_openai( + system_prompt=system_prompt, + user_prompt=user_prompt, + model="gpt-4o", + temperature=0.3, + response_format={"type": "json_object"} + ) + + analysis = json.loads(response) + return analysis + + except Exception as e: + logger.error(f"Analysis failed: {e}") + return { + "error": str(e), + "sufficient_data": True, + "post_count": post_count + } + + async def analyze_multiple_types( + self, + post_types_with_posts: List[Dict[str, Any]] + ) -> Dict[str, Dict[str, Any]]: + """ + Analyze multiple post types. + + Args: + post_types_with_posts: List of dicts with 'post_type' and 'posts' keys + + Returns: + Dictionary mapping post_type_id to analysis + """ + results = {} + + for item in post_types_with_posts: + post_type = item["post_type"] + posts = item["posts"] + + try: + analysis = await self.process(post_type, posts) + results[str(post_type.id)] = analysis + except Exception as e: + logger.error(f"Failed to analyze post type {post_type.name}: {e}") + results[str(post_type.id)] = { + "error": str(e), + "sufficient_data": False + } + + return results + + def get_writing_prompt_section(self, analysis: Dict[str, Any]) -> str: + """ + Generate a prompt section for the writer based on the analysis. + + Args: + analysis: The post type analysis + + Returns: + Formatted string for inclusion in writer prompts + """ + if not analysis.get("sufficient_data"): + return "" + + sections = [] + + # Structure + if structure := analysis.get("structure_patterns"): + sections.append(f""" +STRUKTUR FÜR DIESEN POST-TYP: +- Typische Struktur: {structure.get('typical_structure', 'Standard')} +- Absätze: {structure.get('paragraph_count', '3-5')} Absätze +- Listen: {'Ja' if structure.get('uses_lists') else 'Nein'} +{f"- Listen-Stil: {structure.get('list_style')}" if structure.get('uses_lists') else ''} +""") + + # Language style + if style := analysis.get("language_style"): + sections.append(f""" +SPRACH-STIL: +- Tonalität: {style.get('tone', 'Professionell')} +- Perspektive: {style.get('perspective', 'Ich')} +- Energie-Level: {style.get('energy_level', 7)}/10 +- Formalität: {style.get('formality', 'informell')} + +Typische Satzanfänge: +{chr(10).join([f' - "{s}"' for s in style.get('typical_sentence_starters', [])[:5]])} + +Signature Phrases: +{chr(10).join([f' - "{p}"' for p in style.get('signature_phrases', [])[:5]])} +""") + + # Hooks + if hooks := analysis.get("hooks"): + hook_examples = hooks.get("real_examples", [])[:3] + hook_text = "\n".join([f' - "{h.get("hook", "")}" ({h.get("type", "")})' for h in hook_examples]) + sections.append(f""" +HOOK-MUSTER: +Hook-Typen: {', '.join(hooks.get('hook_types', []))} + +Echte Beispiele: +{hook_text} + +Muster: {', '.join(hooks.get('hook_patterns', [])[:3])} +""") + + # CTAs + if ctas := analysis.get("ctas"): + cta_examples = ctas.get("real_examples", [])[:3] + cta_text = "\n".join([f' - "{c.get("cta", "")}"' for c in cta_examples]) + sections.append(f""" +CTA-MUSTER: +CTA-Typen: {', '.join(ctas.get('cta_types', []))} + +Echte Beispiele: +{cta_text} + +Position: {ctas.get('cta_position', 'Am Ende')} +""") + + # Visual patterns + if visual := analysis.get("visual_patterns"): + emoji = visual.get("emoji_usage", {}) + sections.append(f""" +VISUELLE ELEMENTE: +- Emoji-Nutzung: {emoji.get('frequency', 'mittel')} +- Typische Emojis: {' '.join(emoji.get('typical_emojis', [])[:8])} +- Platzierung: {emoji.get('placement', 'Variabel')} +- Formatierung: {visual.get('formatting', 'Standard')} +""") + + # Length + if length := analysis.get("length_patterns"): + sections.append(f""" +LÄNGE: +- Ideal: ca. {length.get('ideal_length', '200-300')} Wörter +- Range: {length.get('range', '150-400')} Wörter +""") + + # Guidelines + if guidelines := analysis.get("writing_guidelines"): + dos = guidelines.get("dos", [])[:5] + donts = guidelines.get("donts", [])[:3] + sections.append(f""" +WICHTIGE REGELN: +DO: +{chr(10).join([f' ✓ {d}' for d in dos])} + +DON'T: +{chr(10).join([f' ✗ {d}' for d in donts])} +""") + + return "\n".join(sections) diff --git a/src/agents/profile_analyzer.py b/src/agents/profile_analyzer.py new file mode 100644 index 0000000..a562b2f --- /dev/null +++ b/src/agents/profile_analyzer.py @@ -0,0 +1,300 @@ +"""Profile analyzer agent.""" +import json +from typing import Dict, Any, List +from loguru import logger + +from src.agents.base import BaseAgent +from src.database.models import LinkedInProfile, LinkedInPost + + +class ProfileAnalyzerAgent(BaseAgent): + """Agent for analyzing LinkedIn profiles and extracting writing patterns.""" + + def __init__(self): + """Initialize profile analyzer agent.""" + super().__init__("ProfileAnalyzer") + + async def process( + self, + profile: LinkedInProfile, + posts: List[LinkedInPost], + customer_data: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Analyze LinkedIn profile and extract writing patterns. + + Args: + profile: LinkedIn profile data + posts: List of LinkedIn posts + customer_data: Additional customer data from input file + + Returns: + Comprehensive profile analysis + """ + logger.info(f"Analyzing profile for: {profile.name}") + + # Prepare analysis data + profile_summary = { + "name": profile.name, + "headline": profile.headline, + "summary": profile.summary, + "industry": profile.industry, + "location": profile.location + } + + # Prepare posts with engagement data - use up to 30 posts + posts_with_engagement = self._prepare_posts_for_analysis(posts[:15]) + + # Also identify top performing posts by engagement + top_posts = self._get_top_performing_posts(posts, limit=5) + + system_prompt = self._get_system_prompt() + user_prompt = self._get_user_prompt(profile_summary, posts_with_engagement, top_posts, customer_data) + + response = await self.call_openai( + system_prompt=system_prompt, + user_prompt=user_prompt, + model="gpt-4o", + temperature=0.3, + response_format={"type": "json_object"} + ) + + # Parse JSON response + analysis = json.loads(response) + logger.info("Profile analysis completed successfully") + + return analysis + + def _prepare_posts_for_analysis(self, posts: List[LinkedInPost]) -> List[Dict[str, Any]]: + """Prepare posts with engagement data for analysis.""" + prepared = [] + for i, post in enumerate(posts): + if not post.post_text: + continue + prepared.append({ + "index": i + 1, + "text": post.post_text, + "likes": post.likes or 0, + "comments": post.comments or 0, + "shares": post.shares or 0, + "engagement_total": (post.likes or 0) + (post.comments or 0) * 2 + (post.shares or 0) * 3 + }) + return prepared + + def _get_top_performing_posts(self, posts: List[LinkedInPost], limit: int = 5) -> List[Dict[str, Any]]: + """Get top performing posts by engagement.""" + posts_with_engagement = [] + for post in posts: + if not post.post_text or len(post.post_text) < 50: + continue + engagement = (post.likes or 0) + (post.comments or 0) * 2 + (post.shares or 0) * 3 + posts_with_engagement.append({ + "text": post.post_text, + "likes": post.likes or 0, + "comments": post.comments or 0, + "shares": post.shares or 0, + "engagement_score": engagement + }) + + # Sort by engagement and return top posts + sorted_posts = sorted(posts_with_engagement, key=lambda x: x["engagement_score"], reverse=True) + return sorted_posts[:limit] + + def _get_system_prompt(self) -> str: + """Get system prompt for profile analysis.""" + return """Du bist ein hochspezialisierter AI-Analyst für LinkedIn-Profile und Content-Strategie. + +Deine Aufgabe ist es, aus LinkedIn-Profildaten und Posts ein umfassendes Content-Analyse-Profil zu erstellen, das als BLAUPAUSE für das Schreiben neuer Posts dient. + +WICHTIG: Extrahiere ECHTE BEISPIELE aus den Posts! Keine generischen Beschreibungen. + +Das Profil soll folgende Dimensionen analysieren: + +1. **Schreibstil & Tonalität** + - Wie schreibt die Person? (formal, locker, inspirierend, provokativ, etc.) + - Welche Perspektive wird genutzt? (Ich, Wir, Man) + - Wie ist die Ansprache? (Du, Sie, neutral) + - Satzdynamik und Rhythmus + +2. **Phrasen-Bibliothek (KRITISCH!)** + - Hook-Phrasen: Wie beginnen Posts? Extrahiere 5-10 ECHTE Beispiele! + - Übergangs-Phrasen: Wie werden Absätze verbunden? + - Emotionale Ausdrücke: Ausrufe, Begeisterung, etc. + - CTA-Phrasen: Wie werden Leser aktiviert? + - Signature Phrases: Wiederkehrende Markenzeichen + +3. **Struktur-Templates** + - Analysiere die STRUKTUR der Top-Posts + - Erstelle 2-3 konkrete Templates (z.B. "Hook → Flashback → Erkenntnis → CTA") + - Typische Satzanfänge für jeden Abschnitt + +4. **Visuelle Muster** + - Emoji-Nutzung (welche, wo, wie oft) + - Unicode-Formatierung (fett, kursiv) + - Strukturierung (Absätze, Listen, etc.) + +5. **Audience Insights** + - Wer ist die Zielgruppe? + - Welche Probleme werden adressiert? + - Welcher Mehrwert wird geboten? + +Gib deine Analyse als strukturiertes JSON zurück.""" + + def _get_user_prompt( + self, + profile_summary: Dict[str, Any], + posts_with_engagement: List[Dict[str, Any]], + top_posts: List[Dict[str, Any]], + customer_data: Dict[str, Any] + ) -> str: + """Get user prompt with data for analysis.""" + # Format all posts with engagement data + all_posts_text = "" + for post in posts_with_engagement: + all_posts_text += f"\n--- Post {post['index']} (Likes: {post['likes']}, Comments: {post['comments']}, Shares: {post['shares']}) ---\n" + all_posts_text += post['text'][:2000] # Limit each post to 2000 chars + all_posts_text += "\n" + + # Format top performing posts + top_posts_text = "" + if top_posts: + for i, post in enumerate(top_posts, 1): + top_posts_text += f"\n--- TOP POST {i} (Engagement Score: {post['engagement_score']}, Likes: {post['likes']}, Comments: {post['comments']}) ---\n" + top_posts_text += post['text'][:2000] + top_posts_text += "\n" + + return f"""Bitte analysiere folgendes LinkedIn-Profil BASIEREND AUF DEN ECHTEN POSTS: + +**PROFIL-INFORMATIONEN:** +- Name: {profile_summary.get('name', 'N/A')} +- Headline: {profile_summary.get('headline', 'N/A')} +- Branche: {profile_summary.get('industry', 'N/A')} +- Location: {profile_summary.get('location', 'N/A')} +- Summary: {profile_summary.get('summary', 'N/A')} + +**ZUSÄTZLICHE KUNDENDATEN (Persona, Style Guide, etc.):** +{json.dumps(customer_data, indent=2, ensure_ascii=False)} + +**TOP-PERFORMING POSTS (die erfolgreichsten Posts - ANALYSIERE DIESE BESONDERS GENAU!):** +{top_posts_text if top_posts_text else "Keine Engagement-Daten verfügbar"} + +**ALLE POSTS ({len(posts_with_engagement)} Posts mit Engagement-Daten):** +{all_posts_text} + +--- + +WICHTIG: Analysiere die ECHTEN POSTS sehr genau! Deine Analyse muss auf den tatsächlichen Mustern basieren, nicht auf Annahmen. Extrahiere WÖRTLICHE ZITATE wo möglich! + +Achte besonders auf: +1. Die TOP-PERFORMING Posts - was macht sie erfolgreich? +2. Wiederkehrende Phrasen und Formulierungen - WÖRTLICH extrahieren! +3. Wie beginnen die Posts (Hooks)? - ECHTE BEISPIELE sammeln! +4. Wie enden die Posts (CTAs)? +5. Emoji-Verwendung (welche, wo, wie oft) +6. Länge und Struktur der Absätze +7. Typische Satzanfänge und Übergänge + +Erstelle eine umfassende Analyse im folgenden JSON-Format: + +{{ + "writing_style": {{ + "tone": "Beschreibung der Tonalität basierend auf den echten Posts", + "perspective": "Ich/Wir/Man/Gemischt - mit Beispielen aus den Posts", + "form_of_address": "Du/Sie/Neutral - wie spricht die Person die Leser an?", + "sentence_dynamics": "Kurze Sätze? Lange Sätze? Mischung? Fragen?", + "average_post_length": "Kurz/Mittel/Lang", + "average_word_count": 0 + }}, + "linguistic_fingerprint": {{ + "energy_level": 0, + "shouting_usage": "Beschreibung mit konkreten Beispielen aus den Posts", + "punctuation_patterns": "Beschreibung (!!!, ..., ?, etc.)", + "signature_phrases": ["ECHTE Phrasen aus den Posts", "die wiederholt vorkommen"], + "narrative_anchors": ["Storytelling-Elemente", "die die Person nutzt"] + }}, + "phrase_library": {{ + "hook_phrases": [ + "ECHTE Hook-Sätze aus den Posts wörtlich kopiert", + "Mindestens 5-8 verschiedene Beispiele", + "z.B. '𝗞𝗜-𝗦𝘂𝗰𝗵𝗲 𝗶𝘀𝘁 𝗱𝗲𝗿 𝗲𝗿𝘀𝘁𝗲 𝗦𝗰𝗵𝗿𝗶𝘁𝘁 𝗶𝗺 𝗦𝗮𝗹𝗲𝘀 𝗙𝘂𝗻𝗻𝗲𝗹.'" + ], + "transition_phrases": [ + "ECHTE Übergangssätze zwischen Absätzen", + "z.B. 'Und wisst ihr was?', 'Aber Moment...', 'Was das mit X zu tun hat?'" + ], + "emotional_expressions": [ + "Ausrufe und emotionale Marker", + "z.B. 'Halleluja!', 'Sorry to say!!', 'Galopp!!!!'" + ], + "cta_phrases": [ + "ECHTE Call-to-Action Formulierungen", + "z.B. 'Was denkt ihr?', 'Seid ihr dabei?', 'Lasst uns darüber sprechen.'" + ], + "filler_expressions": [ + "Typische Füllwörter und Ausdrücke", + "z.B. 'Ich meine...', 'Wisst ihr...', 'Ok, ok...'" + ] + }}, + "structure_templates": {{ + "primary_structure": "Die häufigste Struktur beschreiben, z.B. 'Unicode-Hook → Persönliche Anekdote → Erkenntnis → Bullet Points → CTA'", + "template_examples": [ + {{ + "name": "Storytelling-Post", + "structure": ["Fetter Hook mit Zitat", "Flashback/Anekdote", "Erkenntnis/Lesson", "Praktische Tipps", "CTA-Frage"], + "example_post_index": 1 + }}, + {{ + "name": "Insight-Post", + "structure": ["Provokante These", "Begründung", "Beispiel", "Handlungsaufforderung"], + "example_post_index": 2 + }} + ], + "typical_sentence_starters": [ + "ECHTE Satzanfänge aus den Posts", + "z.B. 'Ich glaube, dass...', 'Was mir aufgefallen ist...', 'Das Verrückte ist...'" + ], + "paragraph_transitions": [ + "Wie werden Absätze eingeleitet?", + "z.B. 'Und...', 'Aber:', 'Das bedeutet:'" + ] + }}, + "tone_analysis": {{ + "primary_tone": "Haupttonalität basierend auf den Posts", + "emotional_range": "Welche Emotionen werden angesprochen?", + "authenticity_markers": ["Was macht den Stil einzigartig?", "Erkennbare Merkmale"] + }}, + "topic_patterns": {{ + "main_topics": ["Hauptthemen aus den Posts"], + "content_pillars": ["Content-Säulen"], + "expertise_areas": ["Expertise-Bereiche"], + "expertise_level": "Anfänger/Fortgeschritten/Experte" + }}, + "audience_insights": {{ + "target_audience": "Wer wird angesprochen?", + "pain_points_addressed": ["Probleme die adressiert werden"], + "value_proposition": "Welchen Mehrwert bietet die Person?", + "industry_context": "Branchenkontext" + }}, + "visual_patterns": {{ + "emoji_usage": {{ + "emojis": ["Liste der tatsächlich verwendeten Emojis"], + "placement": "Anfang/Ende/Inline/Zwischen Absätzen", + "frequency": "Selten/Mittel/Häufig - pro Post durchschnittlich X" + }}, + "unicode_formatting": "Wird ✓, →, •, 𝗙𝗲𝘁𝘁 etc. verwendet? Wo?", + "structure_preferences": "Absätze/Listen/Einzeiler/Nummeriert" + }}, + "content_strategy": {{ + "hook_patterns": "Wie werden Posts KONKRET eröffnet? Beschreibung des Musters", + "cta_style": "Wie sehen die CTAs aus? Frage? Aufforderung? Keine?", + "storytelling_approach": "Persönliche Geschichten? Metaphern? Case Studies?", + "post_structure": "Hook → Body → CTA? Oder anders?" + }}, + "best_performing_patterns": {{ + "what_works": "Was machen die Top-Posts anders/besser?", + "successful_hooks": ["WÖRTLICHE Beispiel-Hooks aus Top-Posts"], + "engagement_drivers": ["Was treibt Engagement?"] + }} +}} + +KRITISCH: Bei phrase_library und structure_templates müssen ECHTE, WÖRTLICHE Beispiele aus den Posts stehen! Keine generischen Beschreibungen!""" diff --git a/src/agents/researcher.py b/src/agents/researcher.py new file mode 100644 index 0000000..746c4e6 --- /dev/null +++ b/src/agents/researcher.py @@ -0,0 +1,630 @@ +"""Research agent using Perplexity.""" +import json +import random +from datetime import datetime, timedelta +from typing import Dict, Any, List +from loguru import logger + +from src.agents.base import BaseAgent + + +class ResearchAgent(BaseAgent): + """Agent for researching new content topics using Perplexity.""" + + def __init__(self): + """Initialize research agent.""" + super().__init__("Researcher") + + async def process( + self, + profile_analysis: Dict[str, Any], + existing_topics: List[str], + customer_data: Dict[str, Any], + example_posts: List[str] = None, + post_type: Any = None, + post_type_analysis: Dict[str, Any] = None + ) -> Dict[str, Any]: + """ + Research new content topics. + + Args: + profile_analysis: Profile analysis results + existing_topics: List of already covered topics + customer_data: Customer data (contains persona, style_guide, etc.) + example_posts: List of the person's actual posts for style reference + post_type: Optional PostType object for targeted research + post_type_analysis: Optional post type analysis for context + + Returns: + Research results with suggested topics + """ + logger.info("Starting research for new content topics") + if post_type: + logger.info(f"Targeting research for post type: {post_type.name}") + + # Extract key information from profile analysis + audience_insights = profile_analysis.get("audience_insights", {}) + topic_patterns = profile_analysis.get("topic_patterns", {}) + + industry = audience_insights.get("industry_context", "Business") + target_audience = audience_insights.get("target_audience", "Professionals") + content_pillars = topic_patterns.get("content_pillars", []) + pain_points = audience_insights.get("pain_points_addressed", []) + value_proposition = audience_insights.get("value_proposition", "") + + # Extract customer-specific data + persona = customer_data.get("persona", "") if customer_data else "" + + # STEP 1: Use Perplexity for REAL internet research (has live data!) + logger.info("Step 1: Researching with Perplexity (live internet data)") + perplexity_prompt = self._get_perplexity_prompt( + industry=industry, + target_audience=target_audience, + content_pillars=content_pillars, + existing_topics=existing_topics, + pain_points=pain_points, + persona=persona + ) + + # Dynamic system prompt for variety + system_prompts = [ + "Du bist ein investigativer Journalist. Finde die neuesten, spannendsten Entwicklungen mit harten Fakten.", + "Du bist ein Branchen-Analyst. Identifiziere aktuelle Trends und Marktbewegungen mit konkreten Daten.", + "Du bist ein Trend-Scout. Spüre auf, was diese Woche wirklich neu und relevant ist.", + "Du bist ein Research-Spezialist. Finde aktuelle Studien, Statistiken und News mit Quellenangaben." + ] + + raw_research = await self.call_perplexity( + system_prompt=random.choice(system_prompts), + user_prompt=perplexity_prompt, + model="sonar-pro" + ) + + logger.info("Step 2: Transforming research into personalized topic ideas") + # STEP 2: Transform raw research into PERSONALIZED topic suggestions + transform_prompt = self._get_transform_prompt( + raw_research=raw_research, + target_audience=target_audience, + persona=persona, + content_pillars=content_pillars, + example_posts=example_posts or [], + existing_topics=existing_topics, + post_type=post_type, + post_type_analysis=post_type_analysis + ) + + response = await self.call_openai( + system_prompt=self._get_topic_creator_system_prompt(), + user_prompt=transform_prompt, + model="gpt-4o", + temperature=0.7, # Higher for creative topic angles + response_format={"type": "json_object"} + ) + + # Parse JSON response + result = json.loads(response) + suggested_topics = result.get("topics", []) + + # STEP 3: Ensure diversity - filter out similar topics + suggested_topics = self._ensure_diversity(suggested_topics) + + # Parse research results + research_results = { + "raw_response": response, + "suggested_topics": suggested_topics, + "industry": industry, + "target_audience": target_audience + } + + logger.info(f"Research completed with {len(research_results['suggested_topics'])} topic suggestions") + return research_results + + def _get_topic_creator_system_prompt(self) -> str: + """Get system prompt for transforming research into personalized topics.""" + return """Du bist ein LinkedIn Content-Stratege, der aus Recherche-Ergebnissen KONKRETE, PERSONALISIERTE Themenvorschläge erstellt. + +WICHTIG: Du erstellst KEINE Schlagzeilen oder News-Titel! +Du erstellst KONKRETE CONTENT-IDEEN mit: +- Einem klaren ANGLE (Perspektive/Blickwinkel) +- Einer konkreten HOOK-IDEE +- Einem NARRATIV das die Person erzählen könnte + +Der Unterschied: +❌ SCHLECHT (Schlagzeile): "KI verändert den Arbeitsmarkt" +✅ GUT (Themenvorschlag): "Warum ich als [Rolle] plötzlich 50% meiner Zeit mit KI-Prompts verbringe - und was das für mein Team bedeutet" + +❌ SCHLECHT: "Neue Studie zu Remote Work" +✅ GUT: "3 Erkenntnisse aus der Stanford Remote-Studie, die mich als Führungskraft überrascht haben" + +❌ SCHLECHT: "Fachkräftemangel in der IT" +✅ GUT: "Unpopuläre Meinung: Wir haben keinen Fachkräftemangel - wir haben ein Ausbildungsproblem. Hier ist was ich damit meine..." + +Deine Themenvorschläge müssen: +1. ZUR PERSON PASSEN - Klingt wie etwas das diese spezifische Person posten würde +2. EINEN KONKRETEN ANGLE HABEN - Nicht "über X schreiben" sondern "diesen spezifischen Aspekt von X aus dieser Perspektive beleuchten" +3. EINEN HOOK VORSCHLAGEN - Eine konkrete Idee wie der Post starten könnte +4. HINTERGRUND-INFOS LIEFERN - Fakten/Daten aus der Recherche die die Person nutzen kann +5. ABWECHSLUNGSREICH SEIN - Verschiedene Formate und Kategorien + +Antworte als JSON.""" + + def _get_system_prompt(self) -> str: + """Get system prompt for research (legacy, kept for compatibility).""" + return """Du bist ein hochspezialisierter Trend-Analyst und Content-Researcher. + +Deine Mission ist es, aktuelle, hochrelevante Content-Themen für LinkedIn zu identifizieren. + +Du sollst: +1. Aktuelle Trends, News und Diskussionen der letzten 7-14 Tage recherchieren +2. Themen finden, die für die spezifische Zielgruppe relevant sind +3. Verschiedene Kategorien abdecken: + - Aktuelle News & Studien + - Schmerzpunkt-Lösungen + - Konträre Trends (gegen Mainstream-Meinung) + - Emerging Topics + +Für jedes Thema sollst du bereitstellen: +- Einen prägnanten Titel +- Den Kern-Fakt (mit Daten, Quellen, Beispielen) +- Warum es relevant ist für die Zielgruppe +- Die Kategorie + +Fokussiere dich auf Themen, die: +- AKTUELL sind (letzte 1-2 Wochen) +- KONKRET sind (mit Daten/Fakten belegt) +- RELEVANT sind für die Zielgruppe +- UNIQUE sind (nicht bereits behandelt) + +Gib deine Antwort als JSON zurück.""" + + def _get_user_prompt( + self, + industry: str, + target_audience: str, + content_pillars: List[str], + existing_topics: List[str], + pain_points: List[str] = None, + value_proposition: str = "", + persona: str = "" + ) -> str: + """Get user prompt for research.""" + pillars_text = ", ".join(content_pillars) if content_pillars else "Verschiedene Business-Themen" + existing_text = ", ".join(existing_topics[:20]) if existing_topics else "Keine" + pain_points_text = ", ".join(pain_points) if pain_points else "Nicht spezifiziert" + + # Build persona section if available + persona_section = "" + if persona: + persona_section = f""" +**PERSONA DER PERSON (WICHTIG - Themen müssen zu dieser Expertise passen!):** +{persona[:800]} +""" + + return f"""Recherchiere aktuelle LinkedIn-Content-Themen für folgendes Profil: + +**KONTEXT:** +- Branche: {industry} +- Zielgruppe: {target_audience} +- Content-Säulen: {pillars_text} +- Pain Points der Zielgruppe: {pain_points_text} +- Value Proposition: {value_proposition or 'Mehrwert für die Zielgruppe bieten'} +{persona_section} +**BEREITS BEHANDELTE THEMEN (diese NICHT vorschlagen):** +{existing_text} + +**AUFGABE:** +Finde 5-7 verschiedene aktuelle Themen, die: +1. ZUR EXPERTISE/PERSONA der Person passen +2. Die PAIN POINTS der Zielgruppe addressieren +3. AUTHENTISCH von dieser Person kommen könnten +4. NICHT generisch oder beliebig sind + +Kategorien: +1. **News-Flash**: Aktuelle Nachrichten, Studien oder Entwicklungen +2. **Schmerzpunkt-Löser**: Probleme/Diskussionen, die die Zielgruppe aktuell beschäftigen +3. **Konträrer Trend**: Entwicklungen, die gegen die herkömmliche Meinung verstoßen +4. **Emerging Topic**: Neue Trends, die gerade an Fahrt gewinnen + +WICHTIG: Themen müssen zur Person passen! Ein Experte für {industry} würde keine generischen "Productivity-Tips" posten, sondern spezifische Insights aus seinem Fachgebiet. + +Fokus auf deutsche/DACH-Region relevante Themen. + +Gib deine Antwort im folgenden JSON-Format zurück: + +{{ + "topics": [ + {{ + "title": "Prägnanter Arbeitstitel (spezifisch, nicht generisch!)", + "category": "News-Flash / Schmerzpunkt-Löser / Konträrer Trend / Emerging Topic", + "fact": "Detaillierte Zusammenfassung mit Daten, Fakten, Beispielen - SPEZIFISCH für diese Branche", + "relevance": "Warum ist das für {target_audience} wichtig und warum sollte DIESE Person darüber schreiben?", + "source": "Quellenangaben (Studien, Artikel, Statistiken)" + }} + ] +}}""" + + def _get_perplexity_prompt( + self, + industry: str, + target_audience: str, + content_pillars: List[str], + existing_topics: List[str], + pain_points: List[str] = None, + persona: str = "" + ) -> str: + """Get prompt for Perplexity research (optimized for live internet search).""" + pillars_text = ", ".join(content_pillars) if content_pillars else "Business-Themen" + existing_text = ", ".join(existing_topics[:20]) if existing_topics else "Keine bisherigen Themen" + pain_points_text = ", ".join(pain_points) if pain_points else "Allgemeine Business-Probleme" + + # Current date for time-specific searches + today = datetime.now() + date_str = today.strftime("%d. %B %Y") + week_ago = (today - timedelta(days=7)).strftime("%d. %B %Y") + + persona_hint = "" + if persona: + persona_hint = f"\nEXPERTISE DER PERSON: {persona[:600]}\n" + + # Randomize the research focus for variety + research_angles = [ + { + "name": "Breaking News & Studien", + "focus": "Suche nach brandneuen Studien, Reports, Umfragen oder Nachrichten", + "examples": "Neue Statistiken, Forschungsergebnisse, Unternehmens-Announcements" + }, + { + "name": "Kontroverse & Debatten", + "focus": "Suche nach aktuellen Kontroversen, Meinungsverschiedenheiten, heißen Diskussionen", + "examples": "Polarisierende Meinungen, Kritik an Trends, unerwartete Entwicklungen" + }, + { + "name": "Technologie & Innovation", + "focus": "Suche nach neuen Tools, Technologien, Methoden die gerade aufkommen", + "examples": "Neue Software, AI-Entwicklungen, Prozess-Innovationen" + }, + { + "name": "Markt & Wirtschaft", + "focus": "Suche nach wirtschaftlichen Entwicklungen, Marktveränderungen, Branchen-Shifts", + "examples": "Fusionen, Insolvenzen, Markteintritt, Regulierungen" + }, + { + "name": "Menschen & Karriere", + "focus": "Suche nach Personalien, Karriere-Trends, Arbeitsmarkt-Entwicklungen", + "examples": "Führungswechsel, Hiring-Trends, Remote Work Updates, Skill-Demands" + }, + { + "name": "Fails & Learnings", + "focus": "Suche nach öffentlichen Fehlern, Shitstorms, Lessons Learned", + "examples": "PR-Desaster, gescheiterte Launches, öffentliche Kritik" + } + ] + + # Pick 3-4 random angles for this research session + selected_angles = random.sample(research_angles, min(4, len(research_angles))) + angles_text = "\n".join([ + f"- **{angle['name']}**: {angle['focus']} (z.B. {angle['examples']})" + for angle in selected_angles + ]) + + # Random seed words for more variety + seed_variations = [ + f"Was ist DIESE WOCHE ({week_ago} bis {date_str}) passiert in {industry}?", + f"Welche BREAKING NEWS gibt es HEUTE ({date_str}) oder diese Woche in {industry}?", + f"Was diskutiert die {industry}-Branche AKTUELL ({date_str})?", + f"Welche NEUEN Entwicklungen gibt es seit {week_ago} in {industry}?" + ] + seed_question = random.choice(seed_variations) + + return f"""AKTUELLES DATUM: {date_str} + +{seed_question} +{persona_hint} +KONTEXT: +- Branche: {industry} +- Zielgruppe: {target_audience} +- Themen-Fokus: {pillars_text} +- Pain Points: {pain_points_text} + +RECHERCHE-SCHWERPUNKTE FÜR DIESE SESSION: +{angles_text} + +⛔ BEREITS BEHANDELTE THEMEN - NICHT NOCHMAL VORSCHLAGEN: +{existing_text} + +=== DEINE AUFGABE === + +Recherchiere FAKTEN, DATEN und ENTWICKLUNGEN - keine fertigen Themenvorschläge! +Ich brauche ROHDATEN die ich dann in personalisierte Content-Ideen umwandeln kann. + +Für jede Entwicklung/News sammle: +1. **Was genau ist passiert?** - Konkrete Fakten, nicht Interpretationen +2. **Zahlen & Daten** - Statistiken, Prozentsätze, Beträge, Veränderungen +3. **Wer ist beteiligt?** - Unternehmen, Personen, Organisationen +4. **Wann?** - Genaues Datum oder Zeitraum +5. **Quelle** - URL oder Publikationsname +6. **Kontext** - Warum ist das relevant? Was bedeutet es? + +SUCHE NACH: +✅ Neue Studien/Reports mit konkreten Zahlen +✅ Unternehmens-Entscheidungen oder -Ankündigungen +✅ Marktveränderungen mit Daten +✅ Gesetzliche/Regulatorische Änderungen +✅ Kontroverse Aussagen von Branchenführern +✅ Überraschende Statistiken oder Trends +✅ Gescheiterte Projekte oder unerwartete Erfolge + +FORMAT DEINER ANTWORT: +Liefere 8-10 verschiedene Entwicklungen/News mit möglichst vielen Fakten und Zahlen. +Formatiere sie klar und strukturiert. + +QUALITÄTSKRITERIEN: +✅ AKTUALITÄT: Von dieser Woche oder letzter Woche +✅ KONKRETHEIT: Echte Zahlen, Namen, Daten (nicht "Experten sagen...") +✅ VERIFIZIERBARKEIT: Echte Quelle die man prüfen kann +✅ BRANCHENRELEVANZ: Spezifisch für {industry} + +❌ VERMEIDE: +- Vage Aussagen ohne Daten ("KI wird wichtiger") +- Generische Trends ohne konkreten Aufhänger +- Alte News die jeder schon kennt +- Themen ohne verifizierbare Fakten""" + + def _get_transform_prompt( + self, + raw_research: str, + target_audience: str, + persona: str, + content_pillars: List[str], + example_posts: List[str], + existing_topics: List[str], + post_type: Any = None, + post_type_analysis: Dict[str, Any] = None + ) -> str: + """Transform raw research into personalized, concrete topic suggestions.""" + + # Build example posts section + examples_section = "" + if example_posts: + examples_section = "\n\n=== SO SCHREIBT DIESE PERSON (Beispiel-Posts) ===\n" + for i, post in enumerate(example_posts[:5], 1): + post_preview = post[:600] + "..." if len(post) > 600 else post + examples_section += f"\n--- Beispiel {i} ---\n{post_preview}\n" + examples_section += "--- Ende Beispiele ---\n" + + # Build pillars section + pillars_text = ", ".join(content_pillars[:5]) if content_pillars else "Keine spezifischen Säulen" + + # Build existing topics section (to avoid) + existing_text = ", ".join(existing_topics[:15]) if existing_topics else "Keine" + + # Build post type context section + post_type_section = "" + if post_type: + post_type_section = f""" + +=== ZIEL-POST-TYP: {post_type.name} === +{f"Beschreibung: {post_type.description}" if post_type.description else ""} +{f"Typische Hashtags: {', '.join(post_type.identifying_hashtags[:5])}" if post_type.identifying_hashtags else ""} +{f"Keywords: {', '.join(post_type.identifying_keywords[:10])}" if post_type.identifying_keywords else ""} +""" + if post_type.semantic_properties: + props = post_type.semantic_properties + if props.get("purpose"): + post_type_section += f"Zweck: {props['purpose']}\n" + if props.get("typical_tone"): + post_type_section += f"Tonalität: {props['typical_tone']}\n" + if props.get("target_audience"): + post_type_section += f"Zielgruppe: {props['target_audience']}\n" + + if post_type_analysis and post_type_analysis.get("sufficient_data"): + post_type_section += "\n**Analyse-basierte Anforderungen:**\n" + if hooks := post_type_analysis.get("hooks"): + post_type_section += f"- Hook-Typen: {', '.join(hooks.get('hook_types', [])[:3])}\n" + if content := post_type_analysis.get("content_focus"): + post_type_section += f"- Hauptthemen: {', '.join(content.get('main_themes', [])[:3])}\n" + if content.get("target_emotion"): + post_type_section += f"- Ziel-Emotion: {content['target_emotion']}\n" + + post_type_section += "\n**WICHTIG:** Alle Themenvorschläge müssen zu diesem Post-Typ passen!\n" + + return f"""AUFGABE: Transformiere die Recherche-Ergebnisse in KONKRETE, PERSONALISIERTE Themenvorschläge. +{post_type_section} + +=== RECHERCHE-ERGEBNISSE (Rohdaten) === +{raw_research} + +=== PERSON/EXPERTISE === +{persona[:800] if persona else "Keine Persona definiert"} + +=== CONTENT-SÄULEN DER PERSON === +{pillars_text} +{examples_section} +=== BEREITS BEHANDELT (NICHT NOCHMAL!) === +{existing_text} + +=== DEINE AUFGABE === + +Erstelle 6-8 KONKRETE Themenvorschläge die: +1. ZU DIESER PERSON PASSEN - Basierend auf Expertise und Beispiel-Posts +2. EINEN KLAREN ANGLE HABEN - Nicht "über X schreiben" sondern eine spezifische Perspektive +3. FAKTEN AUS DER RECHERCHE NUTZEN - Konkrete Daten/Zahlen einbauen +4. ABWECHSLUNGSREICH SIND - Verschiedene Kategorien und Formate + +KATEGORIEN (mindestens 3 verschiedene!): +- **Meinung/Take**: Deine Perspektive zu einem aktuellen Thema +- **Erfahrungsbericht**: "Was ich gelernt habe als..." +- **Konträr**: "Unpopuläre Meinung: ..." +- **How-To/Insight**: Konkrete Tipps basierend auf Daten +- **Story**: Persönliche Geschichte mit Business-Lesson +- **Analyse**: Daten/Trend analysiert durch deine Expertise-Brille + +FORMAT DER THEMENVORSCHLÄGE: + +{{ + "topics": [ + {{ + "title": "Konkreter Thementitel (kein Schlagzeilen-Stil!)", + "category": "Meinung/Take | Erfahrungsbericht | Konträr | How-To/Insight | Story | Analyse", + "angle": "Der spezifische Blickwinkel/die Perspektive für diesen Post", + "hook_idea": "Konkrete Hook-Idee die zum Post passen würde (1-2 Sätze)", + "key_facts": ["Fakt 1 aus der Recherche", "Fakt 2 mit Zahlen", "Fakt 3"], + "why_this_person": "Warum passt dieses Thema zu DIESER Person und ihrer Expertise?", + "source": "Quellenangabe" + }} + ] +}} + +BEISPIEL EINES GUTEN THEMENVORSCHLAGS: +{{ + "title": "Warum ich als Tech-Lead jetzt 30% meiner Zeit mit Prompt Engineering verbringe", + "category": "Erfahrungsbericht", + "angle": "Persönliche Erfahrung eines Tech-Leads mit der Veränderung seiner Rolle durch KI", + "hook_idea": "Vor einem Jahr habe ich Code geschrieben. Heute schreibe ich Prompts. Und ehrlich? Ich weiß noch nicht ob das gut oder schlecht ist.", + "key_facts": ["GitHub Copilot wird von 92% der Entwickler genutzt (Stack Overflow 2024)", "Durchschnittliche Zeitersparnis: 55%", "Aber: Code-Review-Zeit +40%"], + "why_this_person": "Als Tech-Lead hat die Person direkten Einblick in diese Veränderung und kann authentisch darüber berichten", + "source": "Stack Overflow Developer Survey 2024" +}} + +WICHTIG: +- Jeder Vorschlag muss sich UNTERSCHEIDEN (anderer Angle, andere Kategorie) +- Keine generischen "Die Zukunft von X" Themen +- Hook-Ideen müssen zum Stil der Beispiel-Posts passen! +- Key Facts müssen aus der Recherche stammen (keine erfundenen Zahlen)""" + + def _get_structure_prompt( + self, + raw_research: str, + target_audience: str, + persona: str = "" + ) -> str: + """Get prompt to structure Perplexity research into JSON (legacy).""" + return f"""Strukturiere die folgenden Recherche-Ergebnisse in ein sauberes JSON-Format. + +RECHERCHE-ERGEBNISSE: +{raw_research} + +AUFGABE: +Extrahiere die Themen und formatiere sie als JSON. Behalte ALLE Fakten, Quellen und Details bei. + +Gib das Ergebnis in diesem Format zurück: + +{{ + "topics": [ + {{ + "title": "Prägnanter Titel des Themas", + "category": "News-Flash / Schmerzpunkt-Löser / Konträrer Trend / Emerging Topic", + "fact": "Die kompletten Fakten, Zahlen und Details aus der Recherche - NICHTS weglassen!", + "relevance": "Warum ist das für {target_audience} wichtig?", + "source": "Quellenangaben aus der Recherche" + }} + ] +}} + +WICHTIG: +- Behalte ALLE Fakten und Quellen aus der Recherche +- Erfinde NICHTS dazu +- Wenn etwas unklar ist, lass es weg +- Mindestens 5 Themen wenn vorhanden""" + + def _ensure_diversity(self, topics: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """ + Ensure topic suggestions are diverse (different categories, angles). + + Args: + topics: List of topic suggestions + + Returns: + Filtered list with diverse topics + """ + if len(topics) <= 3: + return topics + + # Track categories used + category_counts = {} + diverse_topics = [] + + for topic in topics: + category = topic.get("category", "Unknown") + + # Allow max 2 topics per category + if category_counts.get(category, 0) < 2: + diverse_topics.append(topic) + category_counts[category] = category_counts.get(category, 0) + 1 + + # If we filtered too many, add back some + if len(diverse_topics) < 5 and len(topics) >= 5: + for topic in topics: + if topic not in diverse_topics: + diverse_topics.append(topic) + if len(diverse_topics) >= 6: + break + + logger.info(f"Diversity check: {len(topics)} -> {len(diverse_topics)} topics, categories: {category_counts}") + return diverse_topics + + def _extract_topics_from_response(self, response: str) -> List[Dict[str, Any]]: + """ + Extract structured topics from Perplexity response. + + Args: + response: Raw response from Perplexity + + Returns: + List of structured topic dictionaries + """ + topics = [] + + # Simple parsing - split by topic markers + sections = response.split("[TITEL]:") + + for section in sections[1:]: # Skip first empty section + try: + # Extract title + title_end = section.find("[KATEGORIE]:") + if title_end == -1: + title_end = section.find("\n") + title = section[:title_end].strip() + + # Extract category + category = "" + if "[KATEGORIE]:" in section: + cat_start = section.find("[KATEGORIE]:") + len("[KATEGORIE]:") + cat_end = section.find("[DER FAKT]:") + if cat_end == -1: + cat_end = section.find("\n", cat_start) + category = section[cat_start:cat_end].strip() + + # Extract fact + fact = "" + if "[DER FAKT]:" in section: + fact_start = section.find("[DER FAKT]:") + len("[DER FAKT]:") + fact_end = section.find("[WARUM RELEVANT]:") + if fact_end == -1: + fact_end = section.find("[QUELLE]:") + if fact_end == -1: + fact_end = len(section) + fact = section[fact_start:fact_end].strip() + + # Extract relevance + relevance = "" + if "[WARUM RELEVANT]:" in section: + rel_start = section.find("[WARUM RELEVANT]:") + len("[WARUM RELEVANT]:") + rel_end = section.find("[QUELLE]:") + if rel_end == -1: + rel_end = len(section) + relevance = section[rel_start:rel_end].strip() + + if title and fact: + topics.append({ + "title": title, + "category": category or "Allgemein", + "fact": fact, + "relevance": relevance, + "source": "perplexity_research" + }) + except Exception as e: + logger.warning(f"Failed to parse topic section: {e}") + continue + + return topics diff --git a/src/agents/topic_extractor.py b/src/agents/topic_extractor.py new file mode 100644 index 0000000..67afd5a --- /dev/null +++ b/src/agents/topic_extractor.py @@ -0,0 +1,129 @@ +"""Topic extractor agent.""" +import json +from typing import List, Dict, Any +from loguru import logger + +from src.agents.base import BaseAgent +from src.database.models import LinkedInPost, Topic + + +class TopicExtractorAgent(BaseAgent): + """Agent for extracting topics from LinkedIn posts.""" + + def __init__(self): + """Initialize topic extractor agent.""" + super().__init__("TopicExtractor") + + async def process(self, posts: List[LinkedInPost], customer_id) -> List[Topic]: + """ + Extract topics from LinkedIn posts. + + Args: + posts: List of LinkedIn posts + customer_id: Customer UUID (as UUID or string) + + Returns: + List of extracted topics + """ + logger.info(f"Extracting topics from {len(posts)} posts") + + # Prepare posts for analysis + posts_data = [] + for idx, post in enumerate(posts[:30]): # Analyze up to 30 posts + posts_data.append({ + "index": idx, + "post_id": str(post.id) if post.id else None, + "text": post.post_text[:500], # Limit text length + "date": str(post.post_date) if post.post_date else None + }) + + system_prompt = self._get_system_prompt() + user_prompt = self._get_user_prompt(posts_data) + + response = await self.call_openai( + system_prompt=system_prompt, + user_prompt=user_prompt, + model="gpt-4o", + temperature=0.3, + response_format={"type": "json_object"} + ) + + # Parse response + result = json.loads(response) + topics_data = result.get("topics", []) + + # Create Topic objects + topics = [] + for topic_data in topics_data: + # Get post index from topic_data if available + post_index = topic_data.get("post_id") + extracted_from_post_id = None + + # Map post index to actual post ID + if post_index is not None and isinstance(post_index, (int, str)): + try: + # Convert to int if it's a string representation + idx = int(post_index) if isinstance(post_index, str) else post_index + # Get the actual post from the posts list + if 0 <= idx < len(posts) and posts[idx].id: + extracted_from_post_id = posts[idx].id + except (ValueError, IndexError): + logger.warning(f"Could not map post index {post_index} to post ID") + + topic = Topic( + customer_id=customer_id, # Will be handled by Pydantic + title=topic_data["title"], + description=topic_data.get("description"), + category=topic_data.get("category"), + extracted_from_post_id=extracted_from_post_id, + extraction_confidence=topic_data.get("confidence", 0.8) + ) + topics.append(topic) + + logger.info(f"Extracted {len(topics)} topics") + return topics + + def _get_system_prompt(self) -> str: + """Get system prompt for topic extraction.""" + return """Du bist ein AI-Experte für Themenanalyse und Content-Kategorisierung. + +Deine Aufgabe ist es, aus einer Liste von LinkedIn-Posts die Hauptthemen zu extrahieren. + +Für jedes identifizierte Thema sollst du: +1. Ein prägnantes Titel geben +2. Eine kurze Beschreibung verfassen +3. Eine Kategorie zuweisen (z.B. "Technologie", "Strategie", "Personal Development", etc.) +4. Die Konfidenz angeben (0.0 - 1.0) + +Wichtig: +- Fasse ähnliche Themen zusammen (z.B. "KI im Marketing" und "AI-Tools" → "KI & Automatisierung") +- Identifiziere übergeordnete Themen-Cluster +- Sei präzise und konkret +- Vermeide zu allgemeine Themen wie "Business" oder "Erfolg" + +Gib deine Antwort als JSON zurück.""" + + def _get_user_prompt(self, posts_data: List[Dict[str, Any]]) -> str: + """Get user prompt with posts data.""" + posts_text = json.dumps(posts_data, indent=2, ensure_ascii=False) + + return f"""Analysiere folgende LinkedIn-Posts und extrahiere die Hauptthemen: + +{posts_text} + +Gib deine Analyse im folgenden JSON-Format zurück: + +{{ + "topics": [ + {{ + "title": "Thementitel", + "description": "Kurze Beschreibung des Themas", + "category": "Kategorie", + "post_id": "ID des repräsentativen Posts (optional)", + "confidence": 0.9, + "frequency": "Wie oft kommt das Thema vor?" + }} + ] +}} + +Extrahiere 5-10 Hauptthemen.""" diff --git a/src/agents/writer.py b/src/agents/writer.py new file mode 100644 index 0000000..2735f9e --- /dev/null +++ b/src/agents/writer.py @@ -0,0 +1,764 @@ +"""Writer agent for creating LinkedIn posts.""" +import asyncio +import json +import random +import re +from typing import Dict, Any, Optional, List +from loguru import logger + +from src.agents.base import BaseAgent +from src.config import settings + + +class WriterAgent(BaseAgent): + """Agent for writing LinkedIn posts based on profile analysis.""" + + def __init__(self): + """Initialize writer agent.""" + super().__init__("Writer") + + async def process( + self, + topic: Dict[str, Any], + profile_analysis: Dict[str, Any], + feedback: Optional[str] = None, + previous_version: Optional[str] = None, + example_posts: Optional[List[str]] = None, + critic_result: Optional[Dict[str, Any]] = None, + learned_lessons: Optional[Dict[str, Any]] = None, + post_type: Any = None, + post_type_analysis: Optional[Dict[str, Any]] = None + ) -> str: + """ + Write a LinkedIn post. + + Args: + topic: Topic dictionary with title, fact, relevance + profile_analysis: Profile analysis results + feedback: Optional feedback from critic (text summary) + previous_version: Optional previous version of the post + example_posts: Optional list of real posts from the customer to use as style reference + critic_result: Optional full critic result with specific_changes + learned_lessons: Optional lessons learned from past critic feedback + post_type: Optional PostType object for type-specific writing + post_type_analysis: Optional analysis of the post type + + Returns: + Written LinkedIn post + """ + if feedback and previous_version: + logger.info(f"Revising post based on critic feedback") + # For revisions, always use single draft (feedback is specific) + return await self._write_single_draft( + topic=topic, + profile_analysis=profile_analysis, + feedback=feedback, + previous_version=previous_version, + example_posts=example_posts, + critic_result=critic_result, + learned_lessons=learned_lessons, + post_type=post_type, + post_type_analysis=post_type_analysis + ) + else: + logger.info(f"Writing initial post for topic: {topic.get('title', 'Unknown')}") + if post_type: + logger.info(f"Using post type: {post_type.name}") + + # Select example posts - use semantic matching if enabled + selected_examples = self._select_example_posts(topic, example_posts, profile_analysis) + + # Use Multi-Draft if enabled for initial posts + if settings.writer_multi_draft_enabled: + return await self._write_multi_draft( + topic=topic, + profile_analysis=profile_analysis, + example_posts=selected_examples, + learned_lessons=learned_lessons, + post_type=post_type, + post_type_analysis=post_type_analysis + ) + else: + return await self._write_single_draft( + topic=topic, + profile_analysis=profile_analysis, + example_posts=selected_examples, + learned_lessons=learned_lessons, + post_type=post_type, + post_type_analysis=post_type_analysis + ) + + def _select_example_posts( + self, + topic: Dict[str, Any], + example_posts: Optional[List[str]], + profile_analysis: Dict[str, Any] + ) -> List[str]: + """ + Select example posts - either semantically similar or random. + + Args: + topic: The topic to write about + example_posts: All available example posts + profile_analysis: Profile analysis results + + Returns: + Selected example posts (3-4 posts) + """ + if not example_posts or len(example_posts) == 0: + return [] + + if not settings.writer_semantic_matching_enabled: + # Fallback to random selection + num_examples = min(3, len(example_posts)) + selected = random.sample(example_posts, num_examples) + logger.info(f"Using {len(selected)} random example posts") + return selected + + # Semantic matching based on keywords + logger.info("Using semantic matching for example post selection") + + # Extract keywords from topic + topic_text = f"{topic.get('title', '')} {topic.get('fact', '')} {topic.get('category', '')}".lower() + topic_keywords = self._extract_keywords(topic_text) + + # Score each post by keyword overlap + scored_posts = [] + for post in example_posts: + post_lower = post.lower() + score = 0 + matched_keywords = [] + + for keyword in topic_keywords: + if keyword in post_lower: + score += 1 + matched_keywords.append(keyword) + + # Bonus for longer matches + score += len(matched_keywords) * 0.5 + + scored_posts.append({ + "post": post, + "score": score, + "matched": matched_keywords + }) + + # Sort by score (highest first) + scored_posts.sort(key=lambda x: x["score"], reverse=True) + + # Take top 2 by relevance + 1 random (for variety) + selected = [] + + # Top 2 most relevant + for item in scored_posts[:2]: + if item["score"] > 0: + selected.append(item["post"]) + logger.debug(f"Selected post (score {item['score']:.1f}, keywords: {item['matched'][:3]})") + + # Add 1 random post for variety (if not already selected) + remaining_posts = [p["post"] for p in scored_posts[2:] if p["post"] not in selected] + if remaining_posts and len(selected) < 3: + random_pick = random.choice(remaining_posts) + selected.append(random_pick) + logger.debug("Added 1 random post for variety") + + # If we still don't have enough, fill with top scored + while len(selected) < 3 and len(selected) < len(example_posts): + for item in scored_posts: + if item["post"] not in selected: + selected.append(item["post"]) + break + + logger.info(f"Selected {len(selected)} example posts via semantic matching") + return selected + + def _extract_keywords(self, text: str) -> List[str]: + """Extract meaningful keywords from text.""" + # Remove common stop words + stop_words = { + 'der', 'die', 'das', 'und', 'in', 'zu', 'den', 'von', 'für', 'mit', + 'auf', 'ist', 'im', 'sich', 'des', 'ein', 'eine', 'als', 'auch', + 'es', 'an', 'werden', 'aus', 'er', 'hat', 'dass', 'sie', 'nach', + 'wird', 'bei', 'einer', 'um', 'am', 'sind', 'noch', 'wie', 'einem', + 'über', 'so', 'zum', 'kann', 'nur', 'sein', 'ich', 'nicht', 'was', + 'oder', 'aber', 'wenn', 'ihre', 'man', 'the', 'and', 'to', 'of', + 'a', 'is', 'that', 'it', 'for', 'on', 'are', 'with', 'be', 'this', + 'was', 'have', 'from', 'your', 'you', 'we', 'our', 'mehr', 'neue', + 'neuen', 'können', 'durch', 'diese', 'dieser', 'einem', 'einen' + } + + # Split and clean + words = re.findall(r'\b[a-zäöüß]{3,}\b', text.lower()) + keywords = [w for w in words if w not in stop_words and len(w) >= 4] + + # Also extract compound words and important terms + important_terms = re.findall(r'\b[A-Z][a-zäöüß]+(?:[A-Z][a-zäöüß]+)*\b', text) + keywords.extend([t.lower() for t in important_terms if len(t) >= 4]) + + # Deduplicate while preserving order + seen = set() + unique_keywords = [] + for kw in keywords: + if kw not in seen: + seen.add(kw) + unique_keywords.append(kw) + + return unique_keywords[:15] # Limit to top 15 keywords + + async def _write_multi_draft( + self, + topic: Dict[str, Any], + profile_analysis: Dict[str, Any], + example_posts: List[str], + learned_lessons: Optional[Dict[str, Any]] = None, + post_type: Any = None, + post_type_analysis: Optional[Dict[str, Any]] = None + ) -> str: + """ + Generate multiple drafts and select the best one. + + Args: + topic: Topic to write about + profile_analysis: Profile analysis results + example_posts: Example posts for style reference + learned_lessons: Lessons learned from past feedback + post_type: Optional PostType object + post_type_analysis: Optional post type analysis + + Returns: + Best selected draft + """ + num_drafts = min(max(settings.writer_multi_draft_count, 2), 5) # Clamp between 2-5 + logger.info(f"Generating {num_drafts} drafts for selection") + + system_prompt = self._get_system_prompt(profile_analysis, example_posts, learned_lessons, post_type, post_type_analysis) + + # Generate drafts in parallel with different temperatures/approaches + draft_configs = [ + {"temperature": 0.5, "approach": "fokussiert"}, + {"temperature": 0.7, "approach": "kreativ"}, + {"temperature": 0.6, "approach": "ausgewogen"}, + {"temperature": 0.8, "approach": "experimentell"}, + {"temperature": 0.55, "approach": "präzise"}, + ][:num_drafts] + + # Create draft tasks + async def generate_draft(config: Dict, draft_num: int) -> Dict[str, Any]: + user_prompt = self._get_user_prompt_for_draft(topic, draft_num, config["approach"]) + try: + draft = await self.call_openai( + system_prompt=system_prompt, + user_prompt=user_prompt, + model="gpt-4o", + temperature=config["temperature"] + ) + return { + "draft_num": draft_num, + "content": draft.strip(), + "approach": config["approach"], + "temperature": config["temperature"] + } + except Exception as e: + logger.error(f"Failed to generate draft {draft_num}: {e}") + return None + + # Run drafts in parallel + tasks = [generate_draft(config, i + 1) for i, config in enumerate(draft_configs)] + results = await asyncio.gather(*tasks) + + # Filter out failed drafts + drafts = [r for r in results if r is not None] + + if not drafts: + raise ValueError("All draft generations failed") + + if len(drafts) == 1: + logger.warning("Only one draft succeeded, using it directly") + return drafts[0]["content"] + + logger.info(f"Generated {len(drafts)} drafts, now selecting best one") + + # Select the best draft + best_draft = await self._select_best_draft(drafts, topic, profile_analysis) + return best_draft + + def _get_user_prompt_for_draft( + self, + topic: Dict[str, Any], + draft_num: int, + approach: str + ) -> str: + """Get user prompt with slight variations for different drafts.""" + # Different emphasis for each draft + emphasis_variations = { + 1: "Fokussiere auf einen STARKEN, überraschenden Hook. Der erste Satz muss fesseln!", + 2: "Fokussiere auf STORYTELLING. Baue eine kleine Geschichte oder Anekdote ein.", + 3: "Fokussiere auf KONKRETEN MEHRWERT. Was lernt der Leser konkret?", + 4: "Fokussiere auf EMOTION. Sprich Gefühle und persönliche Erfahrungen an.", + 5: "Fokussiere auf PROVOKATION. Stelle eine These auf, die zum Nachdenken anregt.", + } + + emphasis = emphasis_variations.get(draft_num, emphasis_variations[1]) + + # Build enhanced topic section with new fields + angle_section = "" + if topic.get('angle'): + angle_section = f"\n**ANGLE/PERSPEKTIVE:**\n{topic.get('angle')}\n" + + hook_section = "" + if topic.get('hook_idea'): + hook_section = f"\n**HOOK-IDEE (als Inspiration):**\n\"{topic.get('hook_idea')}\"\n" + + facts_section = "" + key_facts = topic.get('key_facts', []) + if key_facts and isinstance(key_facts, list) and len(key_facts) > 0: + facts_section = "\n**KEY FACTS (nutze diese!):**\n" + "\n".join([f"- {f}" for f in key_facts]) + "\n" + + why_section = "" + if topic.get('why_this_person'): + why_section = f"\n**WARUM DU DARÜBER SCHREIBEN SOLLTEST:**\n{topic.get('why_this_person')}\n" + + return f"""Schreibe einen LinkedIn-Post zu folgendem Thema: + +**THEMA:** {topic.get('title', 'Unbekanntes Thema')} + +**KATEGORIE:** {topic.get('category', 'Allgemein')} +{angle_section}{hook_section} +**KERN-FAKT / INHALT:** +{topic.get('fact', topic.get('description', ''))} +{facts_section} +**WARUM RELEVANT:** +{topic.get('relevance', 'Aktuelles Thema für die Zielgruppe')} +{why_section} +**DEIN ANSATZ FÜR DIESEN ENTWURF ({approach}):** +{emphasis} + +**AUFGABE:** +Schreibe einen authentischen LinkedIn-Post, der: +1. Mit einem STARKEN, unerwarteten Hook beginnt (nutze die Hook-Idee als Inspiration, NICHT wörtlich!) +2. Den Fakt/das Thema aufgreift und Mehrwert bietet +3. Die Key Facts einbaut wo es passt +4. Eine persönliche Note oder Meinung enthält +5. Mit einem passenden CTA endet + +WICHTIG: +- Vermeide KI-typische Formulierungen ("In der heutigen Zeit", "Tauchen Sie ein", etc.) +- Schreibe natürlich und menschlich +- Der Post soll SOFORT 85+ Punkte im Review erreichen +- Die Hook-Idee ist nur INSPIRATION - mach etwas Eigenes daraus! + +Gib NUR den fertigen Post zurück.""" + + async def _select_best_draft( + self, + drafts: List[Dict[str, Any]], + topic: Dict[str, Any], + profile_analysis: Dict[str, Any] + ) -> str: + """ + Use AI to select the best draft. + + Args: + drafts: List of draft dictionaries + topic: The topic being written about + profile_analysis: Profile analysis for style reference + + Returns: + Content of the best draft + """ + # Build comparison prompt + drafts_text = "" + for draft in drafts: + drafts_text += f"\n\n=== ENTWURF {draft['draft_num']} ({draft['approach']}) ===\n" + drafts_text += draft["content"] + drafts_text += "\n=== ENDE ENTWURF ===" + + # Extract key style elements for comparison + writing_style = profile_analysis.get("writing_style", {}) + linguistic = profile_analysis.get("linguistic_fingerprint", {}) + phrase_library = profile_analysis.get("phrase_library", {}) + + selector_prompt = f"""Du bist ein erfahrener LinkedIn-Content-Editor. Wähle den BESTEN Entwurf aus. + +**THEMA DES POSTS:** +{topic.get('title', 'Unbekannt')} + +**STIL-ANFORDERUNGEN:** +- Tonalität: {writing_style.get('tone', 'Professionell')} +- Energie-Level: {linguistic.get('energy_level', 7)}/10 +- Ansprache: {writing_style.get('form_of_address', 'Du')} +- Typische Hook-Phrasen: {', '.join(phrase_library.get('hook_phrases', [])[:3])} + +**DIE ENTWÜRFE:** +{drafts_text} + +**BEWERTUNGSKRITERIEN:** +1. **Hook-Qualität (30%):** Wie aufmerksamkeitsstark ist der erste Satz? +2. **Stil-Match (25%):** Wie gut passt der Entwurf zum beschriebenen Stil? +3. **Mehrwert (25%):** Wie viel konkreten Nutzen bietet der Post? +4. **Natürlichkeit (20%):** Wie authentisch und menschlich klingt er? + +**AUFGABE:** +Analysiere jeden Entwurf kurz und wähle den besten. Antworte im JSON-Format: + +{{ + "analysis": [ + {{"draft": 1, "hook_score": 8, "style_score": 7, "value_score": 8, "natural_score": 7, "total": 30, "notes": "Kurze Begründung"}}, + ... + ], + "winner": 1, + "reason": "Kurze Begründung für die Wahl" +}}""" + + response = await self.call_openai( + system_prompt="Du bist ein Content-Editor, der LinkedIn-Posts bewertet und den besten auswählt.", + user_prompt=selector_prompt, + model="gpt-4o-mini", # Use cheaper model for selection + temperature=0.2, + response_format={"type": "json_object"} + ) + + try: + result = json.loads(response) + winner_num = result.get("winner", 1) + reason = result.get("reason", "") + + # Find the winning draft + winning_draft = next( + (d for d in drafts if d["draft_num"] == winner_num), + drafts[0] # Fallback to first draft + ) + + logger.info(f"Selected draft {winner_num} ({winning_draft['approach']}): {reason}") + return winning_draft["content"] + + except (json.JSONDecodeError, KeyError) as e: + logger.warning(f"Failed to parse selector response, using first draft: {e}") + return drafts[0]["content"] + + async def _write_single_draft( + self, + topic: Dict[str, Any], + profile_analysis: Dict[str, Any], + feedback: Optional[str] = None, + previous_version: Optional[str] = None, + example_posts: Optional[List[str]] = None, + critic_result: Optional[Dict[str, Any]] = None, + learned_lessons: Optional[Dict[str, Any]] = None, + post_type: Any = None, + post_type_analysis: Optional[Dict[str, Any]] = None + ) -> str: + """Write a single draft (original behavior).""" + # Select examples if not already selected + if example_posts is None: + example_posts = [] + + selected_examples = example_posts + if not feedback and not previous_version: + # Only select for initial posts, not revisions + if len(selected_examples) == 0: + pass # No examples available + elif len(selected_examples) > 3: + selected_examples = random.sample(selected_examples, 3) + + system_prompt = self._get_system_prompt(profile_analysis, selected_examples, learned_lessons, post_type, post_type_analysis) + user_prompt = self._get_user_prompt(topic, feedback, previous_version, critic_result) + + # Lower temperature for more consistent style matching + post = await self.call_openai( + system_prompt=system_prompt, + user_prompt=user_prompt, + model="gpt-4o", + temperature=0.6 + ) + + logger.info("Post written successfully") + return post.strip() + + def _get_system_prompt( + self, + profile_analysis: Dict[str, Any], + example_posts: List[str] = None, + learned_lessons: Optional[Dict[str, Any]] = None, + post_type: Any = None, + post_type_analysis: Optional[Dict[str, Any]] = None + ) -> str: + """Get system prompt for writer - orientiert an bewährten n8n-Prompts.""" + # Extract key profile information + writing_style = profile_analysis.get("writing_style", {}) + linguistic = profile_analysis.get("linguistic_fingerprint", {}) + tone_analysis = profile_analysis.get("tone_analysis", {}) + visual = profile_analysis.get("visual_patterns", {}) + content_strategy = profile_analysis.get("content_strategy", {}) + audience = profile_analysis.get("audience_insights", {}) + phrase_library = profile_analysis.get("phrase_library", {}) + structure_templates = profile_analysis.get("structure_templates", {}) + + # Build example posts section + examples_section = "" + if example_posts and len(example_posts) > 0: + examples_section = "\n\nREFERENZ-POSTS DER PERSON (Orientiere dich am Stil!):\n" + for i, post in enumerate(example_posts, 1): + post_text = post[:1800] + "..." if len(post) > 1800 else post + examples_section += f"\n--- Beispiel {i} ---\n{post_text}\n" + examples_section += "--- Ende Beispiele ---\n" + + # Safe extraction of nested values + emoji_list = visual.get('emoji_usage', {}).get('emojis', ['🚀']) + emoji_str = ' '.join(emoji_list) if isinstance(emoji_list, list) else str(emoji_list) + sig_phrases = linguistic.get('signature_phrases', []) + narrative_anchors = linguistic.get('narrative_anchors', []) + narrative_str = ', '.join(narrative_anchors) if narrative_anchors else 'Storytelling' + pain_points = audience.get('pain_points_addressed', []) + pain_points_str = ', '.join(pain_points) if pain_points else 'Branchenspezifische Herausforderungen' + + # Extract phrase library with variation instruction + hook_phrases = phrase_library.get('hook_phrases', []) + transition_phrases = phrase_library.get('transition_phrases', []) + emotional_expressions = phrase_library.get('emotional_expressions', []) + cta_phrases = phrase_library.get('cta_phrases', []) + filler_expressions = phrase_library.get('filler_expressions', []) + + # Randomly select a subset of phrases for this post (variation!) + def select_phrases(phrases: list, max_count: int = 3) -> str: + if not phrases: + return "Keine verfügbar" + selected = random.sample(phrases, min(max_count, len(phrases))) + return '\n - '.join(selected) + + # Extract structure templates + primary_structure = structure_templates.get('primary_structure', 'Hook → Body → CTA') + sentence_starters = structure_templates.get('typical_sentence_starters', []) + paragraph_transitions = structure_templates.get('paragraph_transitions', []) + + # Build phrase library section + phrase_section = "" + if hook_phrases or emotional_expressions or cta_phrases: + phrase_section = f""" + +2. PHRASEN-BIBLIOTHEK (Wähle passende aus - NICHT alle verwenden!): + +HOOK-VORLAGEN (lass dich inspirieren, kopiere nicht 1:1): + - {select_phrases(hook_phrases, 4)} + +ÜBERGANGS-PHRASEN (nutze 1-2 davon): + - {select_phrases(transition_phrases, 3)} + +EMOTIONALE AUSDRÜCKE (nutze 1-2 passende): + - {select_phrases(emotional_expressions, 4)} + +CTA-FORMULIERUNGEN (wähle eine passende): + - {select_phrases(cta_phrases, 3)} + +FÜLL-AUSDRÜCKE (für natürlichen Flow): + - {select_phrases(filler_expressions, 3)} + +SIGNATURE PHRASES (nutze maximal 1-2 ORGANISCH): + - {select_phrases(sig_phrases, 4)} + +WICHTIG: Variiere! Nutze NICHT immer die gleichen Phrasen. Wähle die, die zum Thema passen. +""" + + # Build structure section + structure_section = f""" + +3. STRUKTUR-TEMPLATE: + +Primäre Struktur: {primary_structure} + +Typische Satzanfänge (nutze ähnliche): + - {select_phrases(sentence_starters, 4)} + +Absatz-Übergänge: + - {select_phrases(paragraph_transitions, 3)} +""" + + # Build lessons learned section (from past feedback) + lessons_section = "" + if learned_lessons and learned_lessons.get("lessons"): + lessons_section = "\n\n6. LESSONS LEARNED (aus vergangenen Posts - BEACHTE DIESE!):\n" + patterns = learned_lessons.get("patterns", {}) + if patterns.get("posts_analyzed", 0) > 0: + lessons_section += f"\n(Basierend auf {patterns.get('posts_analyzed', 0)} analysierten Posts, Durchschnittsscore: {patterns.get('avg_score', 0):.0f}/100)\n" + + for lesson in learned_lessons["lessons"]: + if lesson["type"] == "critical": + lessons_section += f"\n⚠️ KRITISCH - {lesson['message']}\n" + for item in lesson["items"]: + lessons_section += f" ❌ {item}\n" + elif lesson["type"] == "recurring": + lessons_section += f"\n📝 {lesson['message']}\n" + for item in lesson["items"]: + lessons_section += f" • {item}\n" + + lessons_section += "\nBerücksichtige diese Punkte PROAKTIV beim Schreiben!" + + # Build post type section + post_type_section = "" + if post_type: + post_type_section = f""" + +7. POST-TYP SPEZIFISCH: {post_type.name} +{f"Beschreibung: {post_type.description}" if post_type.description else ""} +""" + if post_type_analysis and post_type_analysis.get("sufficient_data"): + # Use the PostTypeAnalyzerAgent's helper method to generate the section + from src.agents.post_type_analyzer import PostTypeAnalyzerAgent + analyzer = PostTypeAnalyzerAgent() + type_guidelines = analyzer.get_writing_prompt_section(post_type_analysis) + if type_guidelines: + post_type_section += f""" +=== POST-TYP ANALYSE & RICHTLINIEN === +{type_guidelines} +=== ENDE POST-TYP RICHTLINIEN === + +WICHTIG: Dieser Post MUSS den Mustern und Richtlinien dieses Post-Typs folgen! +""" + + return f"""ROLLE: Du bist ein erstklassiger Ghostwriter für LinkedIn. Deine Aufgabe ist es, einen Post zu schreiben, der exakt so klingt wie der digitale Zwilling der beschriebenen Person. Du passt dich zu 100% an das bereitgestellte Profil an. +{examples_section} + +1. STIL & ENERGIE: + +Energie-Level (1-10): {linguistic.get('energy_level', 7)} +(WICHTIG: Passe die Intensität und Leidenschaft des Textes EXAKT an diesen Wert an. Bei 9-10 = hochemotional, bei 5-6 = sachlich-professionell) + +Rhetorisches Shouting: {linguistic.get('shouting_usage', 'Dezent')} +(Nutze GROSSBUCHSTABEN für einzelne Wörter genau so wie hier beschrieben, um Emphase zu erzeugen - mach das für KEINE anderen Wörter!) + +Tonalität: {tone_analysis.get('primary_tone', 'Professionell und authentisch')} + +Ansprache (STRENGSTENS EINHALTEN): {writing_style.get('form_of_address', 'Du/Euch')} + +Perspektive (STRENGSTENS EINHALTEN): {writing_style.get('perspective', 'Ich-Perspektive')} + +Satz-Dynamik: {writing_style.get('sentence_dynamics', 'Mix aus kurzen und längeren Sätzen')} +Interpunktion: {linguistic.get('punctuation_patterns', 'Standard')} + +Branche: {audience.get('industry_context', 'Business')} + +Zielgruppe: {audience.get('target_audience', 'Professionals')} +{phrase_section} +{structure_section} + +4. VISUELLE REGELN: + +Unicode-Fettung: Nutze für den ersten Satz (Hook) fette Unicode-Zeichen (z.B. 𝗪𝗶𝗰𝗵𝘁𝗶𝗴𝗲𝗿 𝗦𝗮𝘁𝘇), sofern das zur Person passt: {visual.get('unicode_formatting', 'Fett für Hooks')} + +Emoji-Logik: Verwende diese Emojis: {emoji_str} +Platzierung: {visual.get('emoji_usage', {}).get('placement', 'Ende')} +Häufigkeit: {visual.get('emoji_usage', {}).get('frequency', 'Mittel')} + +Erzähl-Anker: Baue Elemente ein wie: {narrative_str} +(Falls 'PS-Zeilen', 'Dialoge' oder 'Flashbacks' genannt sind, integriere diese wenn es passt.) + +Layout: {visual.get('structure_preferences', 'Kurze Absätze, mobil-optimiert')} + +Länge: Ca. {writing_style.get('average_word_count', 300)} Wörter + +CTA: Beende den Post mit einer Variante von: {content_strategy.get('cta_style', 'Interaktive Frage an die Community')} + + +5. GUARDRAILS (VERBOTE!): + +Vermeide IMMER diese KI-typischen Muster: +- "In der heutigen Zeit", "Tauchen Sie ein", "Es ist kein Geheimnis" +- "Stellen Sie sich vor", "Lassen Sie uns", "Es ist wichtig zu verstehen" +- Gedankenstriche (–) zur Satzverbindung - nutze stattdessen Kommas oder Punkte +- Belehrende Formulierungen wenn die Person eine Ich-Perspektive nutzt +- Übertriebene Superlative ohne Substanz +- Zu perfekte, glatte Formulierungen - echte Menschen schreiben mit Ecken und Kanten +{lessons_section} +{post_type_section} +DEIN AUFTRAG: Schreibe den Post so, dass er für die Zielgruppe ({audience.get('target_audience', 'Professionals')}) einen klaren Mehrwert bietet und ihre Pain Points ({pain_points_str}) adressiert. Mach die Persönlichkeit des linguistischen Fingerabdrucks spürbar. + +Beginne DIREKT mit dem Hook. Keine einleitenden Sätze, kein "Hier ist der Post".""" + + def _get_user_prompt( + self, + topic: Dict[str, Any], + feedback: Optional[str] = None, + previous_version: Optional[str] = None, + critic_result: Optional[Dict[str, Any]] = None + ) -> str: + """Get user prompt for writer.""" + if feedback and previous_version: + # Build specific changes section + specific_changes_text = "" + if critic_result and critic_result.get("specific_changes"): + specific_changes_text = "\n**KONKRETE ÄNDERUNGEN (FÜHRE DIESE EXAKT DURCH!):**\n" + for i, change in enumerate(critic_result["specific_changes"], 1): + specific_changes_text += f"\n{i}. ERSETZE:\n" + specific_changes_text += f" \"{change.get('original', '')}\"\n" + specific_changes_text += f" MIT:\n" + specific_changes_text += f" \"{change.get('replacement', '')}\"\n" + if change.get('reason'): + specific_changes_text += f" (Grund: {change.get('reason')})\n" + + # Build improvements section + improvements_text = "" + if critic_result and critic_result.get("improvements"): + improvements_text = "\n**WEITERE VERBESSERUNGEN:**\n" + for imp in critic_result["improvements"]: + improvements_text += f"- {imp}\n" + + # Revision mode with structured feedback + return f"""ÜBERARBEITE den Post basierend auf dem Kritiker-Feedback. + +**VORHERIGE VERSION:** +{previous_version} + +**AKTUELLER SCORE:** {critic_result.get('overall_score', 'N/A')}/100 + +**FEEDBACK:** +{feedback} +{specific_changes_text} +{improvements_text} +**DEINE AUFGABE:** +1. Führe die konkreten Änderungen EXAKT durch +2. Behalte alles bei was GUT bewertet wurde +3. Der überarbeitete Post soll mindestens 85 Punkte erreichen + +Gib NUR den überarbeiteten Post zurück - keine Kommentare.""" + + else: + # Initial writing mode - enhanced with new topic fields + angle_section = "" + if topic.get('angle'): + angle_section = f"\n**ANGLE/PERSPEKTIVE:**\n{topic.get('angle')}\n" + + hook_section = "" + if topic.get('hook_idea'): + hook_section = f"\n**HOOK-IDEE (als Inspiration):**\n\"{topic.get('hook_idea')}\"\n" + + facts_section = "" + key_facts = topic.get('key_facts', []) + if key_facts and isinstance(key_facts, list) and len(key_facts) > 0: + facts_section = "\n**KEY FACTS (nutze diese!):**\n" + "\n".join([f"- {f}" for f in key_facts]) + "\n" + + return f"""Schreibe einen LinkedIn-Post zu folgendem Thema: + +**THEMA:** {topic.get('title', 'Unbekanntes Thema')} + +**KATEGORIE:** {topic.get('category', 'Allgemein')} +{angle_section}{hook_section} +**KERN-FAKT / INHALT:** +{topic.get('fact', topic.get('description', ''))} +{facts_section} +**WARUM RELEVANT:** +{topic.get('relevance', 'Aktuelles Thema für die Zielgruppe')} + +**AUFGABE:** +Schreibe einen authentischen LinkedIn-Post, der: +1. Mit einem STARKEN, unerwarteten Hook beginnt (nutze Hook-Idee als Inspiration!) +2. Den Fakt/das Thema aufgreift und Mehrwert bietet +3. Die Key Facts einbaut wo es passt +4. Eine persönliche Note oder Meinung enthält +5. Mit einem passenden CTA endet + +WICHTIG: +- Vermeide KI-typische Formulierungen ("In der heutigen Zeit", "Tauchen Sie ein", etc.) +- Schreibe natürlich und menschlich +- Der Post soll SOFORT 85+ Punkte im Review erreichen + +Gib NUR den fertigen Post zurück.""" diff --git a/src/config.py b/src/config.py new file mode 100644 index 0000000..40b112f --- /dev/null +++ b/src/config.py @@ -0,0 +1,57 @@ +"""Configuration management for LinkedIn Workflow.""" +from typing import Optional +from pydantic_settings import BaseSettings, SettingsConfigDict +from pathlib import Path + + +class Settings(BaseSettings): + """Application settings loaded from environment variables.""" + + # API Keys + openai_api_key: str + perplexity_api_key: str + apify_api_key: str + + # Supabase + supabase_url: str + supabase_key: str + + # Apify + apify_actor_id: str = "apimaestro~linkedin-profile-posts" + + # Web Interface + web_password: str = "" + session_secret: str = "" + + # Development + debug: bool = False + log_level: str = "INFO" + + # Email Settings + smtp_host: str = "" + smtp_port: int = 587 + smtp_user: str = "" + smtp_password: str = "" + smtp_from_name: str = "LinkedIn Post System" + email_default_recipient: str = "" + + # Writer Features (can be toggled to disable new features) + writer_multi_draft_enabled: bool = True # Generate multiple drafts and select best + writer_multi_draft_count: int = 3 # Number of drafts to generate (2-5) + writer_semantic_matching_enabled: bool = True # Use semantically similar example posts + writer_learn_from_feedback: bool = True # Learn from recurring critic feedback + writer_feedback_history_count: int = 10 # Number of past posts to analyze for patterns + + # User Frontend (LinkedIn OAuth via Supabase) + user_frontend_enabled: bool = True # Enable user frontend with LinkedIn OAuth + supabase_redirect_url: str = "" # OAuth Callback URL (e.g., https://linkedin.onyva.dev/auth/callback) + + model_config = SettingsConfigDict( + env_file=".env", + env_file_encoding="utf-8", + case_sensitive=False + ) + + +# Global settings instance +settings = Settings() diff --git a/src/database/__init__.py b/src/database/__init__.py new file mode 100644 index 0000000..c66f4bf --- /dev/null +++ b/src/database/__init__.py @@ -0,0 +1,25 @@ +"""Database module.""" +from src.database.client import DatabaseClient, db +from src.database.models import ( + Customer, + LinkedInProfile, + LinkedInPost, + Topic, + ProfileAnalysis, + ResearchResult, + GeneratedPost, + PostType, +) + +__all__ = [ + "DatabaseClient", + "db", + "Customer", + "LinkedInProfile", + "LinkedInPost", + "Topic", + "ProfileAnalysis", + "ResearchResult", + "GeneratedPost", + "PostType", +] diff --git a/src/database/client.py b/src/database/client.py new file mode 100644 index 0000000..e8477c2 --- /dev/null +++ b/src/database/client.py @@ -0,0 +1,533 @@ +"""Supabase database client.""" +import asyncio +from typing import Optional, List, Dict, Any +from uuid import UUID +from supabase import create_client, Client +from loguru import logger + +from src.config import settings +from src.database.models import ( + Customer, LinkedInProfile, LinkedInPost, Topic, + ProfileAnalysis, ResearchResult, GeneratedPost, PostType +) + + +class DatabaseClient: + """Supabase database client wrapper.""" + + def __init__(self): + """Initialize Supabase client.""" + self.client: Client = create_client( + settings.supabase_url, + settings.supabase_key + ) + logger.info("Supabase client initialized") + + # ==================== CUSTOMERS ==================== + + async def create_customer(self, customer: Customer) -> Customer: + """Create a new customer.""" + data = customer.model_dump(exclude={"id", "created_at", "updated_at"}, exclude_none=True) + result = await asyncio.to_thread( + lambda: self.client.table("customers").insert(data).execute() + ) + logger.info(f"Created customer: {result.data[0]['id']}") + return Customer(**result.data[0]) + + async def get_customer(self, customer_id: UUID) -> Optional[Customer]: + """Get customer by ID.""" + result = await asyncio.to_thread( + lambda: self.client.table("customers").select("*").eq("id", str(customer_id)).execute() + ) + if result.data: + return Customer(**result.data[0]) + return None + + async def get_customer_by_linkedin(self, linkedin_url: str) -> Optional[Customer]: + """Get customer by LinkedIn URL.""" + result = await asyncio.to_thread( + lambda: self.client.table("customers").select("*").eq("linkedin_url", linkedin_url).execute() + ) + if result.data: + return Customer(**result.data[0]) + return None + + async def list_customers(self) -> List[Customer]: + """List all customers.""" + result = await asyncio.to_thread( + lambda: self.client.table("customers").select("*").execute() + ) + return [Customer(**item) for item in result.data] + + # ==================== LINKEDIN PROFILES ==================== + + async def save_linkedin_profile(self, profile: LinkedInProfile) -> LinkedInProfile: + """Save or update LinkedIn profile.""" + data = profile.model_dump(exclude={"id", "scraped_at"}, exclude_none=True) + # Convert UUID to string for Supabase + if "customer_id" in data: + data["customer_id"] = str(data["customer_id"]) + + # Check if profile exists + existing = await asyncio.to_thread( + lambda: self.client.table("linkedin_profiles").select("*").eq( + "customer_id", str(profile.customer_id) + ).execute() + ) + + if existing.data: + # Update existing + result = await asyncio.to_thread( + lambda: self.client.table("linkedin_profiles").update(data).eq( + "customer_id", str(profile.customer_id) + ).execute() + ) + else: + # Insert new + result = await asyncio.to_thread( + lambda: self.client.table("linkedin_profiles").insert(data).execute() + ) + + logger.info(f"Saved LinkedIn profile for customer: {profile.customer_id}") + return LinkedInProfile(**result.data[0]) + + async def get_linkedin_profile(self, customer_id: UUID) -> Optional[LinkedInProfile]: + """Get LinkedIn profile for customer.""" + result = await asyncio.to_thread( + lambda: self.client.table("linkedin_profiles").select("*").eq( + "customer_id", str(customer_id) + ).execute() + ) + if result.data: + return LinkedInProfile(**result.data[0]) + return None + + # ==================== LINKEDIN POSTS ==================== + + async def save_linkedin_posts(self, posts: List[LinkedInPost]) -> List[LinkedInPost]: + """Save LinkedIn posts (bulk).""" + from datetime import datetime + + # Deduplicate posts based on (customer_id, post_url) before saving + seen = set() + unique_posts = [] + for p in posts: + key = (str(p.customer_id), p.post_url) + if key not in seen: + seen.add(key) + unique_posts.append(p) + + if len(posts) != len(unique_posts): + logger.warning(f"Removed {len(posts) - len(unique_posts)} duplicate posts from batch") + + data = [] + for p in unique_posts: + post_dict = p.model_dump(exclude={"id", "scraped_at"}, exclude_none=True) + # Convert UUID to string for Supabase + if "customer_id" in post_dict: + post_dict["customer_id"] = str(post_dict["customer_id"]) + + # Convert datetime to ISO string for Supabase + if "post_date" in post_dict and isinstance(post_dict["post_date"], datetime): + post_dict["post_date"] = post_dict["post_date"].isoformat() + + data.append(post_dict) + + if not data: + logger.warning("No posts to save") + return [] + + # Use upsert with on_conflict to handle duplicates based on (customer_id, post_url) + # This will update existing posts instead of throwing an error + result = await asyncio.to_thread( + lambda: self.client.table("linkedin_posts").upsert( + data, + on_conflict="customer_id,post_url" + ).execute() + ) + logger.info(f"Saved {len(result.data)} LinkedIn posts") + return [LinkedInPost(**item) for item in result.data] + + async def get_linkedin_posts(self, customer_id: UUID) -> List[LinkedInPost]: + """Get all LinkedIn posts for customer.""" + result = await asyncio.to_thread( + lambda: self.client.table("linkedin_posts").select("*").eq( + "customer_id", str(customer_id) + ).order("post_date", desc=True).execute() + ) + return [LinkedInPost(**item) for item in result.data] + + async def get_unclassified_posts(self, customer_id: UUID) -> List[LinkedInPost]: + """Get all LinkedIn posts without a post_type_id.""" + result = await asyncio.to_thread( + lambda: self.client.table("linkedin_posts").select("*").eq( + "customer_id", str(customer_id) + ).is_("post_type_id", "null").execute() + ) + return [LinkedInPost(**item) for item in result.data] + + async def get_posts_by_type(self, customer_id: UUID, post_type_id: UUID) -> List[LinkedInPost]: + """Get all LinkedIn posts for a specific post type.""" + result = await asyncio.to_thread( + lambda: self.client.table("linkedin_posts").select("*").eq( + "customer_id", str(customer_id) + ).eq("post_type_id", str(post_type_id)).order("post_date", desc=True).execute() + ) + return [LinkedInPost(**item) for item in result.data] + + async def update_post_classification( + self, + post_id: UUID, + post_type_id: UUID, + classification_method: str, + classification_confidence: float + ) -> None: + """Update a single post's classification.""" + await asyncio.to_thread( + lambda: self.client.table("linkedin_posts").update({ + "post_type_id": str(post_type_id), + "classification_method": classification_method, + "classification_confidence": classification_confidence + }).eq("id", str(post_id)).execute() + ) + logger.debug(f"Updated classification for post {post_id}") + + async def update_posts_classification_bulk( + self, + classifications: List[Dict[str, Any]] + ) -> int: + """ + Bulk update post classifications. + + Args: + classifications: List of dicts with post_id, post_type_id, classification_method, classification_confidence + + Returns: + Number of posts updated + """ + count = 0 + for classification in classifications: + try: + await asyncio.to_thread( + lambda c=classification: self.client.table("linkedin_posts").update({ + "post_type_id": str(c["post_type_id"]), + "classification_method": c["classification_method"], + "classification_confidence": c["classification_confidence"] + }).eq("id", str(c["post_id"])).execute() + ) + count += 1 + except Exception as e: + logger.warning(f"Failed to update classification for post {classification['post_id']}: {e}") + logger.info(f"Bulk updated classifications for {count} posts") + return count + + # ==================== POST TYPES ==================== + + async def create_post_type(self, post_type: PostType) -> PostType: + """Create a new post type.""" + data = post_type.model_dump(exclude={"id", "created_at", "updated_at"}, exclude_none=True) + # Convert UUID to string + if "customer_id" in data: + data["customer_id"] = str(data["customer_id"]) + + result = await asyncio.to_thread( + lambda: self.client.table("post_types").insert(data).execute() + ) + logger.info(f"Created post type: {result.data[0]['name']}") + return PostType(**result.data[0]) + + async def create_post_types_bulk(self, post_types: List[PostType]) -> List[PostType]: + """Create multiple post types at once.""" + if not post_types: + return [] + + data = [] + for pt in post_types: + pt_dict = pt.model_dump(exclude={"id", "created_at", "updated_at"}, exclude_none=True) + if "customer_id" in pt_dict: + pt_dict["customer_id"] = str(pt_dict["customer_id"]) + data.append(pt_dict) + + result = await asyncio.to_thread( + lambda: self.client.table("post_types").insert(data).execute() + ) + logger.info(f"Created {len(result.data)} post types") + return [PostType(**item) for item in result.data] + + async def get_post_types(self, customer_id: UUID, active_only: bool = True) -> List[PostType]: + """Get all post types for a customer.""" + def _query(): + query = self.client.table("post_types").select("*").eq("customer_id", str(customer_id)) + if active_only: + query = query.eq("is_active", True) + return query.order("name").execute() + + result = await asyncio.to_thread(_query) + return [PostType(**item) for item in result.data] + + async def get_post_type(self, post_type_id: UUID) -> Optional[PostType]: + """Get a single post type by ID.""" + result = await asyncio.to_thread( + lambda: self.client.table("post_types").select("*").eq( + "id", str(post_type_id) + ).execute() + ) + if result.data: + return PostType(**result.data[0]) + return None + + async def update_post_type(self, post_type_id: UUID, updates: Dict[str, Any]) -> PostType: + """Update a post type.""" + result = await asyncio.to_thread( + lambda: self.client.table("post_types").update(updates).eq( + "id", str(post_type_id) + ).execute() + ) + logger.info(f"Updated post type: {post_type_id}") + return PostType(**result.data[0]) + + async def update_post_type_analysis( + self, + post_type_id: UUID, + analysis: Dict[str, Any], + analyzed_post_count: int + ) -> PostType: + """Update the analysis for a post type.""" + from datetime import datetime + result = await asyncio.to_thread( + lambda: self.client.table("post_types").update({ + "analysis": analysis, + "analysis_generated_at": datetime.now().isoformat(), + "analyzed_post_count": analyzed_post_count + }).eq("id", str(post_type_id)).execute() + ) + logger.info(f"Updated analysis for post type: {post_type_id}") + return PostType(**result.data[0]) + + async def delete_post_type(self, post_type_id: UUID, soft: bool = True) -> None: + """Delete a post type (soft delete by default).""" + if soft: + await asyncio.to_thread( + lambda: self.client.table("post_types").update({ + "is_active": False + }).eq("id", str(post_type_id)).execute() + ) + logger.info(f"Soft deleted post type: {post_type_id}") + else: + await asyncio.to_thread( + lambda: self.client.table("post_types").delete().eq( + "id", str(post_type_id) + ).execute() + ) + logger.info(f"Hard deleted post type: {post_type_id}") + + # ==================== TOPICS ==================== + + async def save_topics(self, topics: List[Topic]) -> List[Topic]: + """Save extracted topics.""" + if not topics: + logger.warning("No topics to save") + return [] + + data = [] + for t in topics: + topic_dict = t.model_dump(exclude={"id", "created_at"}, exclude_none=True) + # Convert UUID to string for Supabase + if "customer_id" in topic_dict: + topic_dict["customer_id"] = str(topic_dict["customer_id"]) + if "extracted_from_post_id" in topic_dict and topic_dict["extracted_from_post_id"]: + topic_dict["extracted_from_post_id"] = str(topic_dict["extracted_from_post_id"]) + if "target_post_type_id" in topic_dict and topic_dict["target_post_type_id"]: + topic_dict["target_post_type_id"] = str(topic_dict["target_post_type_id"]) + data.append(topic_dict) + + try: + # Use insert and handle duplicates manually + result = await asyncio.to_thread( + lambda: self.client.table("topics").insert(data).execute() + ) + logger.info(f"Saved {len(result.data)} topics to database") + return [Topic(**item) for item in result.data] + except Exception as e: + logger.error(f"Error saving topics: {e}", exc_info=True) + # Try one by one if batch fails + saved = [] + for topic_data in data: + try: + result = await asyncio.to_thread( + lambda td=topic_data: self.client.table("topics").insert(td).execute() + ) + saved.extend([Topic(**item) for item in result.data]) + except Exception as single_error: + logger.warning(f"Skipped duplicate topic: {topic_data.get('title')}") + logger.info(f"Saved {len(saved)} topics individually") + return saved + + async def get_topics( + self, + customer_id: UUID, + unused_only: bool = False, + post_type_id: Optional[UUID] = None + ) -> List[Topic]: + """Get topics for customer, optionally filtered by post type.""" + def _query(): + query = self.client.table("topics").select("*").eq("customer_id", str(customer_id)) + if unused_only: + query = query.eq("is_used", False) + if post_type_id: + query = query.eq("target_post_type_id", str(post_type_id)) + return query.order("created_at", desc=True).execute() + + result = await asyncio.to_thread(_query) + return [Topic(**item) for item in result.data] + + async def mark_topic_used(self, topic_id: UUID) -> None: + """Mark topic as used.""" + await asyncio.to_thread( + lambda: self.client.table("topics").update({ + "is_used": True, + "used_at": "now()" + }).eq("id", str(topic_id)).execute() + ) + logger.info(f"Marked topic {topic_id} as used") + + # ==================== PROFILE ANALYSIS ==================== + + async def save_profile_analysis(self, analysis: ProfileAnalysis) -> ProfileAnalysis: + """Save profile analysis.""" + data = analysis.model_dump(exclude={"id", "created_at"}, exclude_none=True) + # Convert UUID to string for Supabase + if "customer_id" in data: + data["customer_id"] = str(data["customer_id"]) + + # Check if analysis exists + existing = await asyncio.to_thread( + lambda: self.client.table("profile_analyses").select("*").eq( + "customer_id", str(analysis.customer_id) + ).execute() + ) + + if existing.data: + # Update existing + result = await asyncio.to_thread( + lambda: self.client.table("profile_analyses").update(data).eq( + "customer_id", str(analysis.customer_id) + ).execute() + ) + else: + # Insert new + result = await asyncio.to_thread( + lambda: self.client.table("profile_analyses").insert(data).execute() + ) + + logger.info(f"Saved profile analysis for customer: {analysis.customer_id}") + return ProfileAnalysis(**result.data[0]) + + async def get_profile_analysis(self, customer_id: UUID) -> Optional[ProfileAnalysis]: + """Get profile analysis for customer.""" + result = await asyncio.to_thread( + lambda: self.client.table("profile_analyses").select("*").eq( + "customer_id", str(customer_id) + ).execute() + ) + if result.data: + return ProfileAnalysis(**result.data[0]) + return None + + # ==================== RESEARCH RESULTS ==================== + + async def save_research_result(self, research: ResearchResult) -> ResearchResult: + """Save research result.""" + data = research.model_dump(exclude={"id", "created_at"}, exclude_none=True) + # Convert UUIDs to string for Supabase + if "customer_id" in data: + data["customer_id"] = str(data["customer_id"]) + if "target_post_type_id" in data and data["target_post_type_id"]: + data["target_post_type_id"] = str(data["target_post_type_id"]) + + result = await asyncio.to_thread( + lambda: self.client.table("research_results").insert(data).execute() + ) + logger.info(f"Saved research result for customer: {research.customer_id}") + return ResearchResult(**result.data[0]) + + async def get_latest_research(self, customer_id: UUID) -> Optional[ResearchResult]: + """Get latest research result for customer.""" + result = await asyncio.to_thread( + lambda: self.client.table("research_results").select("*").eq( + "customer_id", str(customer_id) + ).order("created_at", desc=True).limit(1).execute() + ) + if result.data: + return ResearchResult(**result.data[0]) + return None + + async def get_all_research( + self, + customer_id: UUID, + post_type_id: Optional[UUID] = None + ) -> List[ResearchResult]: + """Get all research results for customer, optionally filtered by post type.""" + def _query(): + query = self.client.table("research_results").select("*").eq( + "customer_id", str(customer_id) + ) + if post_type_id: + query = query.eq("target_post_type_id", str(post_type_id)) + return query.order("created_at", desc=True).execute() + + result = await asyncio.to_thread(_query) + return [ResearchResult(**item) for item in result.data] + + # ==================== GENERATED POSTS ==================== + + async def save_generated_post(self, post: GeneratedPost) -> GeneratedPost: + """Save generated post.""" + data = post.model_dump(exclude={"id", "created_at"}, exclude_none=True) + # Convert UUIDs to string for Supabase + if "customer_id" in data: + data["customer_id"] = str(data["customer_id"]) + if "topic_id" in data and data["topic_id"]: + data["topic_id"] = str(data["topic_id"]) + if "post_type_id" in data and data["post_type_id"]: + data["post_type_id"] = str(data["post_type_id"]) + + result = await asyncio.to_thread( + lambda: self.client.table("generated_posts").insert(data).execute() + ) + logger.info(f"Saved generated post: {result.data[0]['id']}") + return GeneratedPost(**result.data[0]) + + async def update_generated_post(self, post_id: UUID, updates: Dict[str, Any]) -> GeneratedPost: + """Update generated post.""" + result = await asyncio.to_thread( + lambda: self.client.table("generated_posts").update(updates).eq( + "id", str(post_id) + ).execute() + ) + logger.info(f"Updated generated post: {post_id}") + return GeneratedPost(**result.data[0]) + + async def get_generated_posts(self, customer_id: UUID) -> List[GeneratedPost]: + """Get all generated posts for customer.""" + result = await asyncio.to_thread( + lambda: self.client.table("generated_posts").select("*").eq( + "customer_id", str(customer_id) + ).order("created_at", desc=True).execute() + ) + return [GeneratedPost(**item) for item in result.data] + + async def get_generated_post(self, post_id: UUID) -> Optional[GeneratedPost]: + """Get a single generated post by ID.""" + result = await asyncio.to_thread( + lambda: self.client.table("generated_posts").select("*").eq( + "id", str(post_id) + ).execute() + ) + if result.data: + return GeneratedPost(**result.data[0]) + return None + + +# Global database client instance +db = DatabaseClient() diff --git a/src/database/models.py b/src/database/models.py new file mode 100644 index 0000000..c0e32ae --- /dev/null +++ b/src/database/models.py @@ -0,0 +1,126 @@ +"""Pydantic models for database entities.""" +from datetime import datetime +from typing import Optional, Dict, Any, List +from uuid import UUID +from pydantic import BaseModel, Field, ConfigDict + + +class DBModel(BaseModel): + """Base model for database entities with extra fields ignored.""" + model_config = ConfigDict(extra='ignore') + + +class Customer(DBModel): + """Customer/Client model.""" + id: Optional[UUID] = None + created_at: Optional[datetime] = None + updated_at: Optional[datetime] = None + name: str + email: Optional[str] = None + company_name: Optional[str] = None + linkedin_url: str + metadata: Dict[str, Any] = Field(default_factory=dict) + + +class PostType(DBModel): + """Post type model for categorizing different types of posts.""" + id: Optional[UUID] = None + customer_id: UUID + created_at: Optional[datetime] = None + updated_at: Optional[datetime] = None + name: str + description: Optional[str] = None + identifying_hashtags: List[str] = Field(default_factory=list) + identifying_keywords: List[str] = Field(default_factory=list) + semantic_properties: Dict[str, Any] = Field(default_factory=dict) + analysis: Optional[Dict[str, Any]] = None + analysis_generated_at: Optional[datetime] = None + analyzed_post_count: int = 0 + is_active: bool = True + + +class LinkedInProfile(DBModel): + """LinkedIn profile model.""" + id: Optional[UUID] = None + customer_id: UUID + scraped_at: Optional[datetime] = None + profile_data: Dict[str, Any] + name: Optional[str] = None + headline: Optional[str] = None + summary: Optional[str] = None + location: Optional[str] = None + industry: Optional[str] = None + + +class LinkedInPost(DBModel): + """LinkedIn post model.""" + id: Optional[UUID] = None + customer_id: UUID + scraped_at: Optional[datetime] = None + post_url: Optional[str] = None + post_text: str + post_date: Optional[datetime] = None + likes: int = 0 + comments: int = 0 + shares: int = 0 + raw_data: Optional[Dict[str, Any]] = None + # Post type classification fields + post_type_id: Optional[UUID] = None + classification_method: Optional[str] = None # 'hashtag', 'keyword', 'semantic' + classification_confidence: Optional[float] = None + + +class Topic(DBModel): + """Topic model.""" + id: Optional[UUID] = None + customer_id: UUID + created_at: Optional[datetime] = None + title: str + description: Optional[str] = None + category: Optional[str] = None + extracted_from_post_id: Optional[UUID] = None + extraction_confidence: Optional[float] = None + is_used: bool = False + used_at: Optional[datetime] = None + target_post_type_id: Optional[UUID] = None # Target post type for this topic + + +class ProfileAnalysis(DBModel): + """Profile analysis model.""" + id: Optional[UUID] = None + customer_id: UUID + created_at: Optional[datetime] = None + writing_style: Dict[str, Any] + tone_analysis: Dict[str, Any] + topic_patterns: Dict[str, Any] + audience_insights: Dict[str, Any] + full_analysis: Dict[str, Any] + + +class ResearchResult(DBModel): + """Research result model.""" + id: Optional[UUID] = None + customer_id: UUID + created_at: Optional[datetime] = None + query: str + results: Dict[str, Any] + suggested_topics: List[Dict[str, Any]] + source: str = "perplexity" + target_post_type_id: Optional[UUID] = None # Target post type for this research + + +class GeneratedPost(DBModel): + """Generated post model.""" + id: Optional[UUID] = None + customer_id: UUID + created_at: Optional[datetime] = None + topic_id: Optional[UUID] = None + topic_title: str + post_content: str + iterations: int = 0 + writer_versions: List[str] = Field(default_factory=list) + critic_feedback: List[Dict[str, Any]] = Field(default_factory=list) + status: str = "draft" # draft, approved, published, rejected + approved_at: Optional[datetime] = None + published_at: Optional[datetime] = None + post_type_id: Optional[UUID] = None # Post type used for this generated post diff --git a/src/email_service.py b/src/email_service.py new file mode 100644 index 0000000..b6b4319 --- /dev/null +++ b/src/email_service.py @@ -0,0 +1,144 @@ +"""Email service for sending posts via email.""" +import base64 +import html +import smtplib +import ssl +from email.mime.text import MIMEText +from email.mime.multipart import MIMEMultipart +from pathlib import Path +from typing import Optional +from loguru import logger + +from src.config import settings + + +def _load_logo_base64() -> str: + """Load and encode the logo as base64.""" + logo_path = Path(__file__).parent / "web" / "static" / "logo.png" + if logo_path.exists(): + with open(logo_path, "rb") as f: + return base64.b64encode(f.read()).decode("utf-8") + return "" + + +# Pre-load logo at module import +_LOGO_BASE64 = _load_logo_base64() + + +class EmailService: + """Service for sending emails.""" + + def __init__(self): + """Initialize email service.""" + self.host = settings.smtp_host + self.port = settings.smtp_port + self.user = settings.smtp_user + self.password = settings.smtp_password + self.from_name = settings.smtp_from_name + + def is_configured(self) -> bool: + """Check if email is properly configured.""" + return bool(self.host and self.user and self.password) + + def send_post( + self, + recipient: str, + post_content: str, + topic_title: str, + customer_name: str, + score: Optional[int] = None + ) -> bool: + """ + Send a post via email. + + Args: + recipient: Email address to send to + post_content: The post content + topic_title: Title of the topic + customer_name: Name of the customer + score: Optional critic score + + Returns: + True if sent successfully, False otherwise + """ + if not self.is_configured(): + logger.error("Email not configured. Set SMTP_HOST, SMTP_USER, and SMTP_PASSWORD.") + return False + + try: + # Create message + msg = MIMEMultipart("alternative") + msg["Subject"] = f"Dein LinkedIn Post: {topic_title}" + msg["From"] = f"onyva <{self.user}>" + msg["To"] = recipient + + # Plain text version - just the post + text_content = f"""{post_content} + +-- +onyva""" + + # HTML version - minimal, just post + onyva logo + logo_html = "" + if _LOGO_BASE64: + logo_html = f'onyva' + else: + # Fallback if logo not found + logo_html = 'onyva' + + # Convert newlines to
for email client compatibility + post_html = html.escape(post_content).replace('\n', '
\n') + + html_content = f""" + + + + + + + +
+
{post_html}
+ +
+ + +""" + + # Attach both versions + msg.attach(MIMEText(text_content, "plain", "utf-8")) + msg.attach(MIMEText(html_content, "html", "utf-8")) + + # Send email + context = ssl.create_default_context() + + with smtplib.SMTP(self.host, self.port) as server: + server.ehlo() + server.starttls(context=context) + server.ehlo() + server.login(self.user, self.password) + server.sendmail(self.user, recipient, msg.as_string()) + + logger.info(f"Email sent successfully to {recipient}") + return True + + except smtplib.SMTPAuthenticationError as e: + logger.error(f"SMTP Authentication failed: {e}") + return False + except smtplib.SMTPException as e: + logger.error(f"SMTP error: {e}") + return False + except Exception as e: + logger.error(f"Failed to send email: {e}") + return False + + +# Global email service instance +email_service = EmailService() diff --git a/src/orchestrator.py b/src/orchestrator.py new file mode 100644 index 0000000..b0ba52d --- /dev/null +++ b/src/orchestrator.py @@ -0,0 +1,743 @@ +"""Main orchestrator for the LinkedIn workflow.""" +from collections import Counter +from typing import Dict, Any, List, Optional, Callable +from uuid import UUID +from loguru import logger + +from src.config import settings +from src.database import db, Customer, LinkedInProfile, LinkedInPost, Topic +from src.scraper import scraper +from src.agents import ( + ProfileAnalyzerAgent, + TopicExtractorAgent, + ResearchAgent, + WriterAgent, + CriticAgent, + PostClassifierAgent, + PostTypeAnalyzerAgent, +) +from src.database.models import PostType + + +class WorkflowOrchestrator: + """Orchestrates the entire LinkedIn post creation workflow.""" + + def __init__(self): + """Initialize orchestrator with all agents.""" + self.profile_analyzer = ProfileAnalyzerAgent() + self.topic_extractor = TopicExtractorAgent() + self.researcher = ResearchAgent() + self.writer = WriterAgent() + self.critic = CriticAgent() + self.post_classifier = PostClassifierAgent() + self.post_type_analyzer = PostTypeAnalyzerAgent() + logger.info("WorkflowOrchestrator initialized") + + async def run_initial_setup( + self, + linkedin_url: str, + customer_name: str, + customer_data: Dict[str, Any], + post_types_data: Optional[List[Dict[str, Any]]] = None + ) -> Customer: + """ + Run initial setup for a new customer. + + This includes: + 1. Creating customer record + 2. Creating post types (if provided) + 3. Scraping LinkedIn posts (NO profile scraping) + 4. Creating profile from customer_data + 5. Analyzing profile + 6. Extracting topics from existing posts + 7. Classifying posts by type (if post types exist) + 8. Analyzing post types (if enough posts) + + Args: + linkedin_url: LinkedIn profile URL + customer_name: Customer name + customer_data: Complete customer data (company, persona, style_guide, etc.) + post_types_data: Optional list of post type definitions + + Returns: + Customer object + """ + logger.info(f"=== INITIAL SETUP for {customer_name} ===") + + # Step 1: Check if customer already exists + existing_customer = await db.get_customer_by_linkedin(linkedin_url) + if existing_customer: + logger.warning(f"Customer already exists: {existing_customer.id}") + return existing_customer + + # Step 2: Create customer + total_steps = 7 if post_types_data else 5 + logger.info(f"Step 1/{total_steps}: Creating customer record") + customer = Customer( + name=customer_name, + linkedin_url=linkedin_url, + company_name=customer_data.get("company_name"), + email=customer_data.get("email"), + metadata=customer_data + ) + customer = await db.create_customer(customer) + logger.info(f"Customer created: {customer.id}") + + # Step 2.5: Create post types if provided + created_post_types = [] + if post_types_data: + logger.info(f"Step 2/{total_steps}: Creating {len(post_types_data)} post types") + for pt_data in post_types_data: + post_type = PostType( + customer_id=customer.id, + name=pt_data.get("name", "Unnamed"), + description=pt_data.get("description"), + identifying_hashtags=pt_data.get("identifying_hashtags", []), + identifying_keywords=pt_data.get("identifying_keywords", []), + semantic_properties=pt_data.get("semantic_properties", {}) + ) + created_post_types.append(post_type) + + if created_post_types: + created_post_types = await db.create_post_types_bulk(created_post_types) + logger.info(f"Created {len(created_post_types)} post types") + + # Step 3: Create LinkedIn profile from customer data (NO scraping) + step_num = 3 if post_types_data else 2 + logger.info(f"Step {step_num}/{total_steps}: Creating LinkedIn profile from provided data") + linkedin_profile = LinkedInProfile( + customer_id=customer.id, + profile_data={ + "persona": customer_data.get("persona"), + "form_of_address": customer_data.get("form_of_address"), + "style_guide": customer_data.get("style_guide"), + "linkedin_url": linkedin_url + }, + name=customer_name, + headline=customer_data.get("persona", "")[:100] if customer_data.get("persona") else None + ) + await db.save_linkedin_profile(linkedin_profile) + logger.info("LinkedIn profile saved") + + # Step 4: Scrape ONLY posts using Apify + step_num = 4 if post_types_data else 3 + logger.info(f"Step {step_num}/{total_steps}: Scraping LinkedIn posts") + try: + raw_posts = await scraper.scrape_posts(linkedin_url, limit=50) + parsed_posts = scraper.parse_posts_data(raw_posts) + + linkedin_posts = [] + for post_data in parsed_posts: + post = LinkedInPost( + customer_id=customer.id, + **post_data + ) + linkedin_posts.append(post) + + if linkedin_posts: + await db.save_linkedin_posts(linkedin_posts) + logger.info(f"Saved {len(linkedin_posts)} posts") + else: + logger.warning("No posts scraped") + linkedin_posts = [] + except Exception as e: + logger.error(f"Failed to scrape posts: {e}") + linkedin_posts = [] + + # Step 5: Analyze profile (with manual data + scraped posts) + step_num = 5 if post_types_data else 4 + logger.info(f"Step {step_num}/{total_steps}: Analyzing profile with AI") + try: + profile_analysis = await self.profile_analyzer.process( + profile=linkedin_profile, + posts=linkedin_posts, + customer_data=customer_data + ) + + # Save profile analysis + from src.database.models import ProfileAnalysis + analysis_record = ProfileAnalysis( + customer_id=customer.id, + writing_style=profile_analysis.get("writing_style", {}), + tone_analysis=profile_analysis.get("tone_analysis", {}), + topic_patterns=profile_analysis.get("topic_patterns", {}), + audience_insights=profile_analysis.get("audience_insights", {}), + full_analysis=profile_analysis + ) + await db.save_profile_analysis(analysis_record) + logger.info("Profile analysis saved") + except Exception as e: + logger.error(f"Profile analysis failed: {e}", exc_info=True) + raise + + # Step 6: Extract topics from posts + step_num = 6 if post_types_data else 5 + logger.info(f"Step {step_num}/{total_steps}: Extracting topics from posts") + if linkedin_posts: + try: + topics = await self.topic_extractor.process( + posts=linkedin_posts, + customer_id=customer.id # Pass UUID directly + ) + if topics: + await db.save_topics(topics) + logger.info(f"Extracted and saved {len(topics)} topics") + except Exception as e: + logger.error(f"Topic extraction failed: {e}", exc_info=True) + else: + logger.info("No posts to extract topics from") + + # Step 7 & 8: Classify and analyze post types (if post types exist) + if created_post_types and linkedin_posts: + # Step 7: Classify posts + logger.info(f"Step {total_steps - 1}/{total_steps}: Classifying posts by type") + try: + await self.classify_posts(customer.id) + except Exception as e: + logger.error(f"Post classification failed: {e}", exc_info=True) + + # Step 8: Analyze post types + logger.info(f"Step {total_steps}/{total_steps}: Analyzing post types") + try: + await self.analyze_post_types(customer.id) + except Exception as e: + logger.error(f"Post type analysis failed: {e}", exc_info=True) + + logger.info(f"Step {total_steps}/{total_steps}: Initial setup complete!") + return customer + + async def classify_posts(self, customer_id: UUID) -> int: + """ + Classify unclassified posts for a customer. + + Args: + customer_id: Customer UUID + + Returns: + Number of posts classified + """ + logger.info(f"=== CLASSIFYING POSTS for customer {customer_id} ===") + + # Get post types + post_types = await db.get_post_types(customer_id) + if not post_types: + logger.info("No post types defined, skipping classification") + return 0 + + # Get unclassified posts + posts = await db.get_unclassified_posts(customer_id) + if not posts: + logger.info("No unclassified posts found") + return 0 + + logger.info(f"Classifying {len(posts)} posts into {len(post_types)} types") + + # Run classification + classifications = await self.post_classifier.process(posts, post_types) + + if classifications: + # Bulk update classifications + await db.update_posts_classification_bulk(classifications) + logger.info(f"Classified {len(classifications)} posts") + return len(classifications) + + return 0 + + async def analyze_post_types(self, customer_id: UUID) -> Dict[str, Any]: + """ + Analyze all post types for a customer. + + Args: + customer_id: Customer UUID + + Returns: + Dictionary with analysis results per post type + """ + logger.info(f"=== ANALYZING POST TYPES for customer {customer_id} ===") + + # Get post types + post_types = await db.get_post_types(customer_id) + if not post_types: + logger.info("No post types defined") + return {} + + results = {} + for post_type in post_types: + # Get posts for this type + posts = await db.get_posts_by_type(customer_id, post_type.id) + + if len(posts) < self.post_type_analyzer.MIN_POSTS_FOR_ANALYSIS: + logger.info(f"Post type '{post_type.name}' has only {len(posts)} posts, skipping analysis") + results[str(post_type.id)] = { + "skipped": True, + "reason": f"Not enough posts ({len(posts)} < {self.post_type_analyzer.MIN_POSTS_FOR_ANALYSIS})" + } + continue + + # Run analysis + logger.info(f"Analyzing post type '{post_type.name}' with {len(posts)} posts") + analysis = await self.post_type_analyzer.process(post_type, posts) + + # Save analysis to database + if analysis.get("sufficient_data"): + await db.update_post_type_analysis( + post_type_id=post_type.id, + analysis=analysis, + analyzed_post_count=len(posts) + ) + + results[str(post_type.id)] = analysis + + return results + + async def research_new_topics( + self, + customer_id: UUID, + progress_callback: Optional[Callable[[str, int, int], None]] = None, + post_type_id: Optional[UUID] = None + ) -> List[Dict[str, Any]]: + """ + Research new content topics for a customer. + + Args: + customer_id: Customer UUID + progress_callback: Optional callback(message, current_step, total_steps) + post_type_id: Optional post type to target research for + + Returns: + List of suggested topics + """ + logger.info(f"=== RESEARCHING NEW TOPICS for customer {customer_id} ===") + + # Get post type context if specified + post_type = None + post_type_analysis = None + if post_type_id: + post_type = await db.get_post_type(post_type_id) + if post_type: + post_type_analysis = post_type.analysis + logger.info(f"Targeting research for post type: {post_type.name}") + + def report_progress(message: str, step: int, total: int = 4): + if progress_callback: + progress_callback(message, step, total) + + # Step 1: Get profile analysis + report_progress("Lade Profil-Analyse...", 1) + profile_analysis = await db.get_profile_analysis(customer_id) + if not profile_analysis: + raise ValueError("Profile analysis not found. Run initial setup first.") + + # Step 2: Get ALL existing topics (from multiple sources to avoid repetition) + report_progress("Lade existierende Topics...", 2) + existing_topics = set() + + # From topics table + existing_topics_records = await db.get_topics(customer_id) + for t in existing_topics_records: + existing_topics.add(t.title) + + # From previous research results + all_research = await db.get_all_research(customer_id) + for research in all_research: + if research.suggested_topics: + for topic in research.suggested_topics: + if topic.get("title"): + existing_topics.add(topic["title"]) + + # From generated posts + generated_posts = await db.get_generated_posts(customer_id) + for post in generated_posts: + if post.topic_title: + existing_topics.add(post.topic_title) + + existing_topics = list(existing_topics) + logger.info(f"Found {len(existing_topics)} existing topics to avoid") + + # Get customer data + customer = await db.get_customer(customer_id) + + # Get example posts to understand the person's actual content style + # If post_type_id is specified, only use posts of that type + if post_type_id: + linkedin_posts = await db.get_posts_by_type(customer_id, post_type_id) + else: + linkedin_posts = await db.get_linkedin_posts(customer_id) + + example_post_texts = [ + post.post_text for post in linkedin_posts + if post.post_text and len(post.post_text) > 100 # Only substantial posts + ][:15] # Limit to 15 best examples + logger.info(f"Loaded {len(example_post_texts)} example posts for research context") + + # Step 3: Run research + report_progress("AI recherchiert neue Topics...", 3) + logger.info("Running research with AI") + research_results = await self.researcher.process( + profile_analysis=profile_analysis.full_analysis, + existing_topics=existing_topics, + customer_data=customer.metadata, + example_posts=example_post_texts, + post_type=post_type, + post_type_analysis=post_type_analysis + ) + + # Step 4: Save research results + report_progress("Speichere Ergebnisse...", 4) + from src.database.models import ResearchResult + research_record = ResearchResult( + customer_id=customer_id, + query=f"New topics for {customer.name}" + (f" ({post_type.name})" if post_type else ""), + results={"raw_response": research_results["raw_response"]}, + suggested_topics=research_results["suggested_topics"], + target_post_type_id=post_type_id + ) + await db.save_research_result(research_record) + logger.info(f"Research completed with {len(research_results['suggested_topics'])} suggestions") + + return research_results["suggested_topics"] + + async def create_post( + self, + customer_id: UUID, + topic: Dict[str, Any], + max_iterations: int = 3, + progress_callback: Optional[Callable[[str, int, int, Optional[int], Optional[List], Optional[List]], None]] = None, + post_type_id: Optional[UUID] = None + ) -> Dict[str, Any]: + """ + Create a LinkedIn post through writer-critic iteration. + + Args: + customer_id: Customer UUID + topic: Topic dictionary + max_iterations: Maximum number of writer-critic iterations + progress_callback: Optional callback(message, iteration, max_iterations, score, versions, feedback_list) + post_type_id: Optional post type for type-specific writing + + Returns: + Dictionary with final post and metadata + """ + logger.info(f"=== CREATING POST for topic: {topic.get('title')} ===") + + def report_progress(message: str, iteration: int, score: Optional[int] = None, + versions: Optional[List] = None, feedback_list: Optional[List] = None): + if progress_callback: + progress_callback(message, iteration, max_iterations, score, versions, feedback_list) + + # Get profile analysis + report_progress("Lade Profil-Analyse...", 0, None, [], []) + profile_analysis = await db.get_profile_analysis(customer_id) + if not profile_analysis: + raise ValueError("Profile analysis not found. Run initial setup first.") + + # Get post type info if specified + post_type = None + post_type_analysis = None + if post_type_id: + post_type = await db.get_post_type(post_type_id) + if post_type and post_type.analysis: + post_type_analysis = post_type.analysis + logger.info(f"Using post type '{post_type.name}' for writing") + + # Load customer's real posts as style examples + # If post_type_id is specified, only use posts of that type + if post_type_id: + linkedin_posts = await db.get_posts_by_type(customer_id, post_type_id) + if len(linkedin_posts) < 3: + # Fall back to all posts if not enough type-specific posts + linkedin_posts = await db.get_linkedin_posts(customer_id) + logger.info("Not enough type-specific posts, using all posts") + else: + linkedin_posts = await db.get_linkedin_posts(customer_id) + + example_post_texts = [ + post.post_text for post in linkedin_posts + if post.post_text and len(post.post_text) > 100 # Only use substantial posts + ] + logger.info(f"Loaded {len(example_post_texts)} example posts for style reference") + + # Extract lessons from past feedback (if enabled) + feedback_lessons = await self._extract_recurring_feedback(customer_id) + + # Initialize tracking + writer_versions = [] + critic_feedback_list = [] + current_post = None + approved = False + iteration = 0 + + # Writer-Critic loop + while iteration < max_iterations and not approved: + iteration += 1 + logger.info(f"--- Iteration {iteration}/{max_iterations} ---") + + # Writer creates/revises post + if iteration == 1: + # Initial post + report_progress("Writer erstellt ersten Entwurf...", iteration, None, writer_versions, critic_feedback_list) + current_post = await self.writer.process( + topic=topic, + profile_analysis=profile_analysis.full_analysis, + example_posts=example_post_texts, + learned_lessons=feedback_lessons, # Pass lessons from past feedback + post_type=post_type, + post_type_analysis=post_type_analysis + ) + else: + # Revision based on feedback - pass full critic result for structured changes + report_progress("Writer überarbeitet Post...", iteration, None, writer_versions, critic_feedback_list) + last_feedback = critic_feedback_list[-1] + current_post = await self.writer.process( + topic=topic, + profile_analysis=profile_analysis.full_analysis, + feedback=last_feedback.get("feedback", ""), + previous_version=writer_versions[-1], + example_posts=example_post_texts, + critic_result=last_feedback, # Pass full critic result with specific_changes + learned_lessons=feedback_lessons, # Also for revisions + post_type=post_type, + post_type_analysis=post_type_analysis + ) + + writer_versions.append(current_post) + logger.info(f"Writer produced version {iteration}") + + # Report progress with new version + report_progress("Critic bewertet Post...", iteration, None, writer_versions, critic_feedback_list) + + # Critic reviews post with iteration awareness + critic_result = await self.critic.process( + post=current_post, + profile_analysis=profile_analysis.full_analysis, + topic=topic, + example_posts=example_post_texts, + iteration=iteration, + max_iterations=max_iterations + ) + critic_feedback_list.append(critic_result) + + approved = critic_result.get("approved", False) + score = critic_result.get("overall_score", 0) + + # Auto-approve on last iteration if score is decent (>= 80) + if iteration == max_iterations and not approved and score >= 80: + approved = True + critic_result["approved"] = True + logger.info(f"Auto-approved on final iteration with score {score}") + + logger.info(f"Critic score: {score}/100 | Approved: {approved}") + + if approved: + report_progress("Post genehmigt!", iteration, score, writer_versions, critic_feedback_list) + logger.info("Post approved!") + break + else: + report_progress(f"Score: {score}/100 - Überarbeitung nötig", iteration, score, writer_versions, critic_feedback_list) + + if iteration < max_iterations: + logger.info("Post needs revision, continuing...") + + # Determine final status based on score + final_score = critic_feedback_list[-1].get("overall_score", 0) if critic_feedback_list else 0 + if approved and final_score >= 85: + status = "approved" + elif approved and final_score >= 80: + status = "approved" # Auto-approved + else: + status = "draft" + + # Save generated post + from src.database.models import GeneratedPost + generated_post = GeneratedPost( + customer_id=customer_id, + topic_title=topic.get("title", "Unknown"), + post_content=current_post, + iterations=iteration, + writer_versions=writer_versions, + critic_feedback=critic_feedback_list, + status=status, + post_type_id=post_type_id + ) + saved_post = await db.save_generated_post(generated_post) + + logger.info(f"Post creation complete after {iteration} iterations") + + return { + "post_id": saved_post.id, + "final_post": current_post, + "iterations": iteration, + "approved": approved, + "final_score": critic_feedback_list[-1].get("overall_score", 0) if critic_feedback_list else 0, + "writer_versions": writer_versions, + "critic_feedback": critic_feedback_list + } + + async def _extract_recurring_feedback(self, customer_id: UUID) -> Dict[str, Any]: + """ + Extract recurring feedback patterns from past generated posts. + + Args: + customer_id: Customer UUID + + Returns: + Dictionary with recurring improvements and lessons learned + """ + if not settings.writer_learn_from_feedback: + return {"lessons": [], "patterns": {}} + + # Get recent generated posts with their critic feedback + generated_posts = await db.get_generated_posts(customer_id) + + if not generated_posts: + return {"lessons": [], "patterns": {}} + + # Limit to recent posts + recent_posts = generated_posts[:settings.writer_feedback_history_count] + + # Collect all improvements from final feedback + all_improvements = [] + all_scores = [] + low_score_issues = [] # Issues from posts that scored < 85 + + for post in recent_posts: + if not post.critic_feedback: + continue + + # Get final feedback (last in list) + final_feedback = post.critic_feedback[-1] if post.critic_feedback else None + if not final_feedback: + continue + + score = final_feedback.get("overall_score", 0) + all_scores.append(score) + + # Collect improvements + improvements = final_feedback.get("improvements", []) + all_improvements.extend(improvements) + + # Track issues from lower-scoring posts + if score < 85: + low_score_issues.extend(improvements) + + if not all_improvements: + return {"lessons": [], "patterns": {}} + + # Count frequency of improvements (normalized) + def normalize_improvement(text: str) -> str: + """Normalize improvement text for comparison.""" + text = text.lower().strip() + # Remove common prefixes + for prefix in ["der ", "die ", "das ", "mehr ", "weniger ", "zu "]: + if text.startswith(prefix): + text = text[len(prefix):] + return text[:50] # Limit length for comparison + + improvement_counts = Counter([normalize_improvement(imp) for imp in all_improvements]) + low_score_counts = Counter([normalize_improvement(imp) for imp in low_score_issues]) + + # Find recurring issues (mentioned 2+ times) + recurring_issues = [ + imp for imp, count in improvement_counts.most_common(10) + if count >= 2 + ] + + # Find critical issues (from low-scoring posts, mentioned 2+ times) + critical_issues = [ + imp for imp, count in low_score_counts.most_common(5) + if count >= 2 + ] + + # Build lessons learned + lessons = [] + + if critical_issues: + lessons.append({ + "type": "critical", + "message": "Diese Punkte führten zu niedrigen Scores - UNBEDINGT vermeiden:", + "items": critical_issues[:3] + }) + + if recurring_issues: + # Filter out critical issues + non_critical = [r for r in recurring_issues if r not in critical_issues] + if non_critical: + lessons.append({ + "type": "recurring", + "message": "Häufig genannte Verbesserungspunkte aus vergangenen Posts:", + "items": non_critical[:4] + }) + + # Calculate average score for context + avg_score = sum(all_scores) / len(all_scores) if all_scores else 0 + + logger.info(f"Extracted feedback from {len(recent_posts)} posts: {len(lessons)} lesson categories, avg score: {avg_score:.1f}") + + return { + "lessons": lessons, + "patterns": { + "avg_score": avg_score, + "posts_analyzed": len(recent_posts), + "recurring_count": len(recurring_issues), + "critical_count": len(critical_issues) + } + } + + async def get_customer_status(self, customer_id: UUID) -> Dict[str, Any]: + """ + Get status information for a customer. + + Args: + customer_id: Customer UUID + + Returns: + Status dictionary + """ + customer = await db.get_customer(customer_id) + if not customer: + raise ValueError("Customer not found") + + profile = await db.get_linkedin_profile(customer_id) + posts = await db.get_linkedin_posts(customer_id) + analysis = await db.get_profile_analysis(customer_id) + generated_posts = await db.get_generated_posts(customer_id) + all_research = await db.get_all_research(customer_id) + post_types = await db.get_post_types(customer_id) + + # Count total research entries + research_count = len(all_research) + + # Count classified posts + classified_posts = [p for p in posts if p.post_type_id] + + # Count analyzed post types + analyzed_types = [pt for pt in post_types if pt.analysis] + + # Check what's missing + missing_items = [] + if not posts: + missing_items.append("LinkedIn Posts (Scraping)") + if not analysis: + missing_items.append("Profil-Analyse") + if research_count == 0: + missing_items.append("Research Topics") + + # Ready for posts if we have scraped posts and profile analysis + ready_for_posts = len(posts) > 0 and analysis is not None + + return { + "has_scraped_posts": len(posts) > 0, + "scraped_posts_count": len(posts), + "has_profile_analysis": analysis is not None, + "research_count": research_count, + "posts_count": len(generated_posts), + "ready_for_posts": ready_for_posts, + "missing_items": missing_items, + "post_types_count": len(post_types), + "classified_posts_count": len(classified_posts), + "analyzed_types_count": len(analyzed_types) + } + + +# Global orchestrator instance +orchestrator = WorkflowOrchestrator() diff --git a/src/scraper/__init__.py b/src/scraper/__init__.py new file mode 100644 index 0000000..00a33a1 --- /dev/null +++ b/src/scraper/__init__.py @@ -0,0 +1,4 @@ +"""Scraper module.""" +from src.scraper.apify_scraper import LinkedInScraper, scraper + +__all__ = ["LinkedInScraper", "scraper"] diff --git a/src/scraper/apify_scraper.py b/src/scraper/apify_scraper.py new file mode 100644 index 0000000..f13d300 --- /dev/null +++ b/src/scraper/apify_scraper.py @@ -0,0 +1,168 @@ +"""LinkedIn posts scraper using Apify (apimaestro~linkedin-profile-posts).""" +import asyncio +from typing import Dict, Any, List +from apify_client import ApifyClient +from loguru import logger + +from src.config import settings + + +class LinkedInScraper: + """LinkedIn posts scraper using Apify.""" + + def __init__(self): + """Initialize Apify client.""" + self.client = ApifyClient(settings.apify_api_key) + logger.info("Apify client initialized") + + async def scrape_posts(self, linkedin_url: str, limit: int = 50) -> List[Dict[str, Any]]: + """ + Scrape posts from a LinkedIn profile. + + Args: + linkedin_url: URL of the LinkedIn profile + limit: Maximum number of posts to scrape + + Returns: + List of post dictionaries + """ + logger.info(f"Scraping posts from: {linkedin_url}") + + # Extract username from LinkedIn URL + # Example: https://www.linkedin.com/in/christinahildebrandt/ -> christinahildebrandt + username = self._extract_username_from_url(linkedin_url) + logger.info(f"Extracted username: {username}") + + # Prepare the Actor input for apimaestro~linkedin-profile-posts + run_input = { + "username": username, + "page_number": 1, + "limit": limit, + } + + try: + # Run the Actor in thread pool to avoid blocking event loop + run = await asyncio.to_thread( + self.client.actor(settings.apify_actor_id).call, + run_input=run_input + ) + + # Fetch results from the run's dataset in thread pool + dataset_items = await asyncio.to_thread( + lambda: list(self.client.dataset(run["defaultDatasetId"]).iterate_items()) + ) + + + if not dataset_items: + logger.warning("No posts found") + return [] + + logger.info(f"Successfully scraped {len(dataset_items)} posts") + return dataset_items + + except Exception as e: + logger.error(f"Error scraping posts: {e}") + raise + + def _extract_username_from_url(self, linkedin_url: str) -> str: + """ + Extract username from LinkedIn URL. + + Args: + linkedin_url: LinkedIn profile URL + + Returns: + Username + """ + import re + + # Remove trailing slash + url = linkedin_url.rstrip('/') + + # Extract username from different LinkedIn URL formats + # https://www.linkedin.com/in/username/ + # https://linkedin.com/in/username + # www.linkedin.com/in/username + match = re.search(r'/in/([^/]+)', url) + if match: + return match.group(1) + + # If no match, raise error + raise ValueError(f"Could not extract username from LinkedIn URL: {linkedin_url}") + + def parse_posts_data(self, raw_posts: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """ + Parse and structure the raw Apify posts data. + + Only includes posts with post_type "regular" (excludes reposts, shared posts, etc.) + + Args: + raw_posts: List of raw post data from Apify + + Returns: + List of structured post dictionaries + """ + from datetime import datetime + parsed_posts = [] + skipped_count = 0 + + for post in raw_posts: + # Only include regular posts (not reposts, shares, etc.) + post_type = post.get("post_type", "").lower() + if post_type != "regular": + skipped_count += 1 + logger.debug(f"Skipping non-regular post (type: {post_type})") + continue + # Extract posted_at date + posted_at_data = post.get("posted_at", {}) + post_date = None + + if isinstance(posted_at_data, dict): + date_str = posted_at_data.get("date") + if date_str: + try: + # Try to parse the date string + # Format: "2026-01-20 07:45:33" + post_date = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S") + except (ValueError, TypeError): + # If parsing fails, keep as string + post_date = date_str + + # Extract stats + stats = post.get("stats", {}) + + # Create a clean copy of raw_data without datetime objects + raw_data_clean = {} + for key, value in post.items(): + if isinstance(value, datetime): + raw_data_clean[key] = value.isoformat() + elif isinstance(value, dict): + # Handle nested dicts + raw_data_clean[key] = {} + for k, v in value.items(): + if isinstance(v, datetime): + raw_data_clean[key][k] = v.isoformat() + else: + raw_data_clean[key][k] = v + else: + raw_data_clean[key] = value + + parsed_post = { + "post_url": post.get("url"), + "post_text": post.get("text", ""), + "post_date": post_date, + "likes": stats.get("like", 0) if stats else 0, + "comments": stats.get("comments", 0) if stats else 0, + "shares": stats.get("reposts", 0) if stats else 0, + "raw_data": raw_data_clean + } + parsed_posts.append(parsed_post) + + if skipped_count > 0: + logger.info(f"Skipped {skipped_count} non-regular posts (reposts, shares, etc.)") + + return parsed_posts + + +# Global scraper instance +scraper = LinkedInScraper() diff --git a/src/tui/__init__.py b/src/tui/__init__.py new file mode 100644 index 0000000..c8ef323 --- /dev/null +++ b/src/tui/__init__.py @@ -0,0 +1,4 @@ +"""TUI module.""" +from src.tui.app import LinkedInWorkflowApp, run_app + +__all__ = ["LinkedInWorkflowApp", "run_app"] diff --git a/src/tui/app.py b/src/tui/app.py new file mode 100644 index 0000000..33c04c6 --- /dev/null +++ b/src/tui/app.py @@ -0,0 +1,912 @@ +"""Main TUI application using Textual.""" +import threading +from textual.app import App, ComposeResult +from textual.containers import Container, Horizontal, Vertical, ScrollableContainer +from textual.widgets import Header, Footer, Button, Static, Input, Label, TextArea, OptionList, LoadingIndicator, ProgressBar +from textual.widgets.option_list import Option +from textual.binding import Binding +from textual.screen import Screen +from textual.worker import Worker, WorkerState +from loguru import logger + +from src.orchestrator import orchestrator +from src.database import db + + +class WelcomeScreen(Screen): + """Welcome screen with main menu.""" + + BINDINGS = [ + Binding("q", "quit", "Quit"), + ] + + def compose(self) -> ComposeResult: + """Create child widgets.""" + yield Header() + yield Container( + Static( + """ +[bold cyan]Multi-Agent AI Workflow[/] + + +[yellow]Choose an option:[/] +""", + id="welcome_text", + ), + Button("🚀 New Customer Setup", id="btn_new_customer", variant="primary"), + Button("🔍 Research Topics", id="btn_research", variant="success"), + Button("✍️ Create Post", id="btn_create_post", variant="success"), + Button("📊 View Status", id="btn_status", variant="default"), + Button("❌ Exit", id="btn_exit", variant="error"), + id="menu_container", + ) + yield Footer() + + def on_button_pressed(self, event: Button.Pressed) -> None: + """Handle button presses.""" + button_id = event.button.id + + if button_id == "btn_new_customer": + self.app.push_screen(NewCustomerScreen()) + elif button_id == "btn_research": + self.app.push_screen(ResearchScreen()) + elif button_id == "btn_create_post": + self.app.push_screen(CreatePostScreen()) + elif button_id == "btn_status": + self.app.push_screen(StatusScreen()) + elif button_id == "btn_exit": + self.app.exit() + + +class NewCustomerScreen(Screen): + """Screen for setting up a new customer.""" + + BINDINGS = [ + Binding("escape", "app.pop_screen", "Back"), + ] + + def compose(self) -> ComposeResult: + """Create child widgets.""" + yield Header() + yield ScrollableContainer( + Static("[bold cyan]═══ New Customer Setup ═══[/]\n", id="title"), + + # Basic Info Section + Static("[bold yellow]Basic Information[/]"), + Label("Customer Name *:"), + Input(placeholder="Enter customer name", id="input_name"), + + Label("LinkedIn URL *:"), + Input(placeholder="https://www.linkedin.com/in/username", id="input_linkedin"), + + Label("Company Name:"), + Input(placeholder="Enter company name", id="input_company"), + + Label("Email:"), + Input(placeholder="customer@example.com", id="input_email"), + + # Persona Section + Static("\n[bold yellow]Persona[/]"), + Label("Describe the customer's persona, expertise, and positioning:"), + TextArea(id="input_persona"), + + # Form of Address + Static("\n[bold yellow]Communication Style[/]"), + Label("Form of Address:"), + Input(placeholder="e.g., Duzen (Du/Euch) or Siezen (Sie)", id="input_address"), + + # Style Guide + Label("Style Guide:"), + Label("Describe writing style, tone, and guidelines:"), + TextArea(id="input_style_guide"), + + # Topic History + Static("\n[bold yellow]Content History[/]"), + Label("Topic History (comma separated):"), + Label("Enter previous topics covered:"), + TextArea(id="input_topic_history"), + + # Example Posts + Label("Example Posts (separate with --- on new line):"), + Label("Paste example posts to analyze writing style:"), + TextArea(id="input_example_posts"), + + # Actions + Static("\n"), + Horizontal( + Button("Cancel", id="btn_cancel", variant="error"), + Button("Start Setup", id="btn_start", variant="primary"), + id="button_row" + ), + + # Status/Progress area + Container( + Static("", id="status_message"), + id="status_container" + ), + + id="form_container", + ) + yield Footer() + + def on_button_pressed(self, event: Button.Pressed) -> None: + """Handle button presses.""" + if event.button.id == "btn_cancel": + self.app.pop_screen() + elif event.button.id == "btn_start": + self.start_setup() + + def start_setup(self) -> None: + """Start the customer setup process.""" + # Get inputs + name = self.query_one("#input_name", Input).value.strip() + linkedin_url = self.query_one("#input_linkedin", Input).value.strip() + company = self.query_one("#input_company", Input).value.strip() + email = self.query_one("#input_email", Input).value.strip() + persona = self.query_one("#input_persona", TextArea).text.strip() + form_of_address = self.query_one("#input_address", Input).value.strip() + style_guide = self.query_one("#input_style_guide", TextArea).text.strip() + topic_history_raw = self.query_one("#input_topic_history", TextArea).text.strip() + example_posts_raw = self.query_one("#input_example_posts", TextArea).text.strip() + + status_widget = self.query_one("#status_message", Static) + + if not name or not linkedin_url: + status_widget.update("[red]✗ Please fill in required fields (Name and LinkedIn URL)[/]") + return + + # Parse topic history + topic_history = [t.strip() for t in topic_history_raw.split(",") if t.strip()] + + # Parse example posts + example_posts = [p.strip() for p in example_posts_raw.split("---") if p.strip()] + + # Disable buttons during setup + self.query_one("#btn_start", Button).disabled = True + self.query_one("#btn_cancel", Button).disabled = True + + # Show progress steps + status_widget.update("[bold cyan]Starting setup process...[/]\n") + + customer_data = { + "company_name": company, + "email": email, + "persona": persona, + "form_of_address": form_of_address, + "style_guide": style_guide, + "topic_history": topic_history, + "example_posts": example_posts + } + + # Show what's happening + status_widget.update( + "[bold cyan]⏳ Step 1/5: Creating customer record...[/]\n" + "[bold cyan]⏳ Step 2/5: Creating LinkedIn profile...[/]\n" + "[bold cyan]⏳ Step 3/5: Scraping LinkedIn posts...[/]\n" + "[yellow] This may take 1-2 minutes...[/]" + ) + + # Run setup in background worker + self.run_worker( + self._run_setup_worker(linkedin_url, name, customer_data), + name="setup_worker", + group="setup", + exclusive=True + ) + + async def _run_setup_worker(self, linkedin_url: str, name: str, customer_data: dict): + """Worker method to run setup in background.""" + return await orchestrator.run_initial_setup( + linkedin_url=linkedin_url, + customer_name=name, + customer_data=customer_data + ) + + def on_worker_state_changed(self, event: Worker.StateChanged) -> None: + """Handle worker state changes.""" + if event.worker.name != "setup_worker": + return + + status_widget = self.query_one("#status_message", Static) + + if event.state == WorkerState.SUCCESS: + # Worker completed successfully + customer = event.worker.result + status_widget.update( + "[bold green]✓ Step 1/5: Customer record created[/]\n" + "[bold green]✓ Step 2/5: LinkedIn profile created[/]\n" + "[bold green]✓ Step 3/5: LinkedIn posts scraped[/]\n" + "[bold green]✓ Step 4/5: Profile analyzed[/]\n" + "[bold green]✓ Step 5/5: Topics extracted[/]\n\n" + f"[bold cyan]═══ Setup Complete! ═══[/]\n" + f"[green]Customer ID: {customer.id}[/]\n" + f"[green]Name: {customer.name}[/]\n\n" + "[yellow]You can now research topics or create posts.[/]" + ) + logger.info(f"Setup completed for customer: {customer.id}") + elif event.state == WorkerState.ERROR: + # Worker failed + error = event.worker.error + logger.exception(f"Setup failed: {error}") + status_widget.update( + f"[bold red]✗ Setup Failed[/]\n\n" + f"[red]Error: {str(error)}[/]\n\n" + f"[yellow]Please check the error and try again.[/]" + ) + self.query_one("#btn_start", Button).disabled = False + self.query_one("#btn_cancel", Button).disabled = False + elif event.state == WorkerState.CANCELLED: + # Worker was cancelled + status_widget.update("[yellow]Setup cancelled[/]") + self.query_one("#btn_start", Button).disabled = False + self.query_one("#btn_cancel", Button).disabled = False + + +class ResearchScreen(Screen): + """Screen for researching new topics.""" + + BINDINGS = [ + Binding("escape", "app.pop_screen", "Back"), + ] + + def compose(self) -> ComposeResult: + """Create child widgets.""" + yield Header() + yield Container( + Static("[bold cyan]═══ Research New Topics ═══[/]\n"), + + Static("[bold yellow]Select Customer[/]"), + Static("Use arrow keys to navigate, Enter to select", id="help_text"), + OptionList(id="customer_list"), + + Static("\n"), + Button("Start Research", id="btn_research", variant="primary"), + + Static("\n"), + Container( + Static("", id="progress_status"), + ProgressBar(id="progress_bar", total=100, show_eta=False), + id="progress_container" + ), + + ScrollableContainer( + Static("", id="research_results"), + id="results_container" + ), + + id="research_container", + ) + yield Footer() + + async def on_mount(self) -> None: + """Load customers when screen mounts.""" + # Hide progress container initially + self.query_one("#progress_container").display = False + await self.load_customers() + + async def load_customers(self) -> None: + """Load customer list.""" + try: + customers = await db.list_customers() + customer_list = self.query_one("#customer_list", OptionList) + + if customers: + for c in customers: + customer_list.add_option( + Option(f"- {c.name} - {c.company_name or 'No Company'}", id=str(c.id)) + ) + self._customers = {str(c.id): c for c in customers} + else: + self.query_one("#help_text", Static).update( + "[yellow]No customers found. Please create a customer first.[/]" + ) + except Exception as e: + logger.error(f"Failed to load customers: {e}") + self.query_one("#help_text", Static).update(f"[red]Error loading customers: {str(e)}[/]") + + def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None: + """Handle customer selection.""" + self._selected_customer_id = event.option.id + + def on_button_pressed(self, event: Button.Pressed) -> None: + """Handle button presses.""" + if event.button.id == "btn_research": + if hasattr(self, "_selected_customer_id") and self._selected_customer_id: + self.start_research(self._selected_customer_id) + else: + results_widget = self.query_one("#research_results", Static) + results_widget.update("[yellow]Please select a customer first.[/]") + + def start_research(self, customer_id: str) -> None: + """Start research.""" + # Clear previous results + self.query_one("#research_results", Static).update("") + + # Show progress container + self.query_one("#progress_container").display = True + self.query_one("#progress_bar", ProgressBar).update(progress=0) + self.query_one("#progress_status", Static).update("[bold cyan]Starte Research...[/]") + + # Disable button + self.query_one("#btn_research", Button).disabled = True + + # Run research in background worker + self.run_worker( + self._run_research_worker(customer_id), + name="research_worker", + group="research", + exclusive=True + ) + + def _update_research_progress(self, message: str, step: int, total: int) -> None: + """Update progress - works from both main thread and worker threads.""" + def update(): + progress_pct = (step / total) * 100 + self.query_one("#progress_bar", ProgressBar).update(progress=progress_pct) + self.query_one("#progress_status", Static).update(f"[bold cyan]Step {step}/{total}:[/] {message}") + self.refresh() + + # Check if we're on the main thread or a different thread + if self.app._thread_id == threading.get_ident(): + # Same thread - schedule update for next tick to allow UI refresh + self.app.call_later(update) + else: + # Different thread - use call_from_thread + self.app.call_from_thread(update) + + async def _run_research_worker(self, customer_id: str): + """Worker method to run research in background.""" + from uuid import UUID + return await orchestrator.research_new_topics( + UUID(customer_id), + progress_callback=self._update_research_progress + ) + + def on_worker_state_changed(self, event: Worker.StateChanged) -> None: + """Handle worker state changes.""" + if event.worker.name != "research_worker": + return + + results_widget = self.query_one("#research_results", Static) + + if event.state == WorkerState.SUCCESS: + # Worker completed successfully + topics = event.worker.result + + # Update progress to 100% + self.query_one("#progress_bar", ProgressBar).update(progress=100) + self.query_one("#progress_status", Static).update("[bold green]✓ Abgeschlossen![/]") + + # Format results + output = "[bold green]✓ Research Complete![/]\n\n" + output += f"[bold cyan]Found {len(topics)} new topic suggestions:[/]\n\n" + + for i, topic in enumerate(topics, 1): + output += f"[bold]{i}. {topic.get('title', 'Unknown')}[/]\n" + output += f" [dim]Category:[/] {topic.get('category', 'N/A')}\n" + + fact = topic.get('fact', '') + if fact: + if len(fact) > 200: + fact = fact[:197] + "..." + output += f" [dim]Description:[/] {fact}\n" + + output += "\n" + + output += "[yellow]Topics saved to research results and ready for post creation.[/]" + results_widget.update(output) + elif event.state == WorkerState.ERROR: + # Worker failed + error = event.worker.error + logger.exception(f"Research failed: {error}") + self.query_one("#progress_status", Static).update("[bold red]✗ Fehler![/]") + results_widget.update( + f"[bold red]✗ Research Failed[/]\n\n" + f"[red]Error: {str(error)}[/]\n\n" + f"[yellow]Please check the error and try again.[/]" + ) + elif event.state == WorkerState.CANCELLED: + # Worker was cancelled + results_widget.update("[yellow]Research cancelled[/]") + + # Hide progress container after a moment (keep visible briefly to show completion) + # self.query_one("#progress_container").display = False + + # Re-enable button + self.query_one("#btn_research", Button).disabled = False + + +class CreatePostScreen(Screen): + """Screen for creating posts.""" + + BINDINGS = [ + Binding("escape", "app.pop_screen", "Back"), + ] + + def compose(self) -> ComposeResult: + """Create child widgets.""" + yield Header() + yield Container( + Static("[bold cyan]═══ Create LinkedIn Post ═══[/]\n"), + + # Customer Selection + Static("[bold yellow]1. Select Customer[/]"), + Static("Use arrow keys to navigate, Enter to select", id="help_customer"), + OptionList(id="customer_list"), + + # Topic Selection + Static("\n[bold yellow]2. Select Topic[/]"), + Static("Select a customer first to load topics...", id="help_topic"), + OptionList(id="topic_list"), + + Static("\n"), + Button("Create Post", id="btn_create", variant="primary"), + + Static("\n"), + Container( + Static("", id="progress_status"), + ProgressBar(id="progress_bar", total=100, show_eta=False), + Static("", id="iteration_info"), + id="progress_container" + ), + + ScrollableContainer( + Static("", id="post_output"), + id="output_container" + ), + id="create_container", + ) + yield Footer() + + async def on_mount(self) -> None: + """Load data when screen mounts.""" + # Hide progress container initially + self.query_one("#progress_container").display = False + await self.load_customers() + + async def load_customers(self) -> None: + """Load customer list.""" + try: + customers = await db.list_customers() + customer_list = self.query_one("#customer_list", OptionList) + + if customers: + for c in customers: + customer_list.add_option( + Option(f"- {c.name} - {c.company_name or 'No Company'}", id=str(c.id)) + ) + self._customers = {str(c.id): c for c in customers} + else: + self.query_one("#help_customer", Static).update( + "[yellow]No customers found.[/]" + ) + except Exception as e: + logger.exception(f"Failed to load customers: {e}") + self.query_one("#help_customer", Static).update( + f"[red]Error loading customers: {str(e)}[/]" + ) + + async def on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None: + """Handle selection from option lists.""" + if event.option_list.id == "customer_list": + # Customer selected + self._selected_customer_id = event.option.id + customer_name = self._customers[event.option.id].name + self.query_one("#help_customer", Static).update( + f"[green]✓ Selected: {customer_name}[/]" + ) + # Load topics for this customer + await self.load_topics(event.option.id) + elif event.option_list.id == "topic_list": + # Topic selected + self._selected_topic_index = int(event.option.id) + topic = self._topics[self._selected_topic_index] + self.query_one("#help_topic", Static).update( + f"[green]✓ Selected: {topic.get('title', 'Unknown')}[/]" + ) + + async def load_topics(self, customer_id) -> None: + """Load ALL topics for customer from ALL research results.""" + try: + from uuid import UUID + # Get ALL research results, not just the latest + all_research = await db.get_all_research(UUID(customer_id)) + + topic_list = self.query_one("#topic_list", OptionList) + topic_list.clear_options() + + # Collect all topics from all research results + all_topics = [] + for research in all_research: + if research.suggested_topics: + all_topics.extend(research.suggested_topics) + + if all_topics: + self._topics = all_topics + + for i, t in enumerate(all_topics): + # Show title and category + display_text = f"- {t.get('title', 'Unknown')} [{t.get('category', 'N/A')}]" + topic_list.add_option(Option(display_text, id=str(i))) + + self.query_one("#help_topic", Static).update( + f"[cyan]{len(all_topics)} topics available from {len(all_research)} research(es) - select one to continue[/]" + ) + else: + self.query_one("#help_topic", Static).update( + "[yellow]No research topics found. Run research first.[/]" + ) + except Exception as e: + logger.exception(f"Failed to load topics: {e}") + self.query_one("#help_topic", Static).update( + f"[red]Error loading topics: {str(e)}[/]" + ) + + def on_button_pressed(self, event: Button.Pressed) -> None: + """Handle button presses.""" + if event.button.id == "btn_create": + if not hasattr(self, "_selected_customer_id") or not self._selected_customer_id: + output_widget = self.query_one("#post_output", Static) + output_widget.update("[yellow]Please select a customer first.[/]") + return + + if not hasattr(self, "_selected_topic_index") or self._selected_topic_index is None: + output_widget = self.query_one("#post_output", Static) + output_widget.update("[yellow]Please select a topic first.[/]") + return + + from uuid import UUID + topic = self._topics[self._selected_topic_index] + self.create_post(UUID(self._selected_customer_id), topic) + + def create_post(self, customer_id, topic) -> None: + """Create a post.""" + output_widget = self.query_one("#post_output", Static) + + # Clear previous output + output_widget.update("") + + # Show progress container + self.query_one("#progress_container").display = True + self.query_one("#progress_bar", ProgressBar).update(progress=0) + self.query_one("#progress_status", Static).update("[bold cyan]Starte Post-Erstellung...[/]") + self.query_one("#iteration_info", Static).update("") + + # Disable button + self.query_one("#btn_create", Button).disabled = True + + # Run post creation in background worker + self.run_worker( + self._run_create_post_worker(customer_id, topic), + name="create_post_worker", + group="create_post", + exclusive=True + ) + + def _update_post_progress(self, message: str, iteration: int, max_iterations: int, score: int = None) -> None: + """Update progress - works from both main thread and worker threads.""" + def update(): + # Calculate progress based on iteration + if iteration == 0: + progress_pct = 0 + else: + progress_pct = (iteration / max_iterations) * 100 + + self.query_one("#progress_bar", ProgressBar).update(progress=progress_pct) + self.query_one("#progress_status", Static).update(f"[bold cyan]{message}[/]") + + if iteration > 0: + score_text = f" | Score: {score}/100" if score else "" + self.query_one("#iteration_info", Static).update( + f"[dim]Iteration {iteration}/{max_iterations}{score_text}[/]" + ) + self.refresh() + + # Check if we're on the main thread or a different thread + if self.app._thread_id == threading.get_ident(): + # Same thread - schedule update for next tick to allow UI refresh + self.app.call_later(update) + else: + # Different thread - use call_from_thread + self.app.call_from_thread(update) + + async def _run_create_post_worker(self, customer_id, topic): + """Worker method to create post in background.""" + return await orchestrator.create_post( + customer_id=customer_id, + topic=topic, + max_iterations=3, + progress_callback=self._update_post_progress + ) + + def on_worker_state_changed(self, event: Worker.StateChanged) -> None: + """Handle worker state changes.""" + if event.worker.name != "create_post_worker": + return + + output_widget = self.query_one("#post_output", Static) + + if event.state == WorkerState.SUCCESS: + # Worker completed successfully + result = event.worker.result + topic = self._topics[self._selected_topic_index] + + # Update progress to 100% + self.query_one("#progress_bar", ProgressBar).update(progress=100) + self.query_one("#progress_status", Static).update("[bold green]✓ Post erstellt![/]") + self.query_one("#iteration_info", Static).update( + f"[green]Final: {result['iterations']} Iterationen | Score: {result['final_score']}/100[/]" + ) + + # Format output + output = f"[bold green]✓ Post Created Successfully![/]\n\n" + output += f"[bold cyan]═══ Post Details ═══[/]\n" + output += f"[bold]Topic:[/] {topic.get('title', 'Unknown')}\n" + output += f"[bold]Iterations:[/] {result['iterations']}\n" + output += f"[bold]Final Score:[/] {result['final_score']}/100\n" + output += f"[bold]Approved:[/] {'✓ Yes' if result['approved'] else '✗ No (reached max iterations)'}\n\n" + + output += f"[bold cyan]═══ Final Post ═══[/]\n\n" + output += f"[white]{result['final_post']}[/]\n\n" + + output += f"[bold cyan]═══════════════════[/]\n" + output += f"[yellow]Post saved to database with ID: {result['post_id']}[/]" + + output_widget.update(output) + elif event.state == WorkerState.ERROR: + # Worker failed + error = event.worker.error + logger.exception(f"Post creation failed: {error}") + self.query_one("#progress_status", Static).update("[bold red]✗ Fehler![/]") + output_widget.update( + f"[bold red]✗ Post Creation Failed[/]\n\n" + f"[red]Error: {str(error)}[/]\n\n" + f"[yellow]Please check the error and try again.[/]" + ) + elif event.state == WorkerState.CANCELLED: + # Worker was cancelled + output_widget.update("[yellow]Post creation cancelled[/]") + + # Re-enable button + self.query_one("#btn_create", Button).disabled = False + + +class StatusScreen(Screen): + """Screen for viewing customer status.""" + + BINDINGS = [ + Binding("escape", "app.pop_screen", "Back"), + ] + + def compose(self) -> ComposeResult: + """Create child widgets.""" + yield Header() + yield Container( + Static("[bold cyan]═══ Customer Status ═══[/]\n\n"), + ScrollableContainer( + Static("Loading...", id="status_content"), + id="status_scroll" + ), + Static("\n"), + Button("Refresh", id="btn_refresh", variant="primary"), + ) + yield Footer() + + def on_mount(self) -> None: + """Load status when screen mounts.""" + self.load_status() + + def load_status(self) -> None: + """Load and display status.""" + status_widget = self.query_one("#status_content", Static) + status_widget.update("[yellow]Loading customer data...[/]") + + # Run status loading in background worker + self.run_worker( + self._run_load_status_worker(), + name="load_status_worker", + group="status", + exclusive=True + ) + + async def _run_load_status_worker(self): + """Worker method to load status in background.""" + customers = await db.list_customers() + if not customers: + return None + + output = "" + for customer in customers: + status = await orchestrator.get_customer_status(customer.id) + + output += f"[bold cyan]╔═══ {customer.name} ═══╗[/]\n" + output += f"[bold]Customer ID:[/] {customer.id}\n" + output += f"[bold]LinkedIn:[/] {customer.linkedin_url}\n" + output += f"[bold]Company:[/] {customer.company_name or 'N/A'}\n\n" + + output += f"[bold yellow]Status:[/]\n" + output += f" Profile: {'[green]✓ Created[/]' if status['has_profile'] else '[red]✗ Missing[/]'}\n" + output += f" Analysis: {'[green]✓ Complete[/]' if status['has_analysis'] else '[red]✗ Missing[/]'}\n\n" + + output += f"[bold yellow]Content:[/]\n" + output += f" LinkedIn Posts: [cyan]{status['posts_count']}[/]\n" + output += f" Extracted Topics: [cyan]{status['topics_count']}[/]\n" + output += f" Generated Posts: [cyan]{status['generated_posts_count']}[/]\n" + + output += f"[bold cyan]╚{'═' * (len(customer.name) + 8)}╝[/]\n\n" + + return output + + def on_worker_state_changed(self, event: Worker.StateChanged) -> None: + """Handle worker state changes.""" + if event.worker.name != "load_status_worker": + return + + status_widget = self.query_one("#status_content", Static) + + if event.state == WorkerState.SUCCESS: + # Worker completed successfully + output = event.worker.result + if output is None: + status_widget.update( + "[yellow]No customers found.[/]\n" + "[dim]Create a new customer to get started.[/]" + ) + else: + status_widget.update(output) + elif event.state == WorkerState.ERROR: + # Worker failed + error = event.worker.error + logger.exception(f"Failed to load status: {error}") + status_widget.update( + f"[bold red]✗ Error Loading Status[/]\n\n" + f"[red]{str(error)}[/]" + ) + elif event.state == WorkerState.CANCELLED: + # Worker was cancelled + status_widget.update("[yellow]Status loading cancelled[/]") + + def on_button_pressed(self, event: Button.Pressed) -> None: + """Handle button presses.""" + if event.button.id == "btn_refresh": + self.load_status() + + +class LinkedInWorkflowApp(App): + """Main Textual application.""" + + CSS = """ + Screen { + align: center middle; + } + + #menu_container { + width: 60; + height: auto; + padding: 2; + border: solid $primary; + background: $surface; + } + + #menu_container Button { + width: 100%; + margin: 1; + } + + #welcome_text { + text-align: center; + padding: 1; + } + + #form_container { + width: 100%; + height: 100%; + padding: 2; + } + + #form_container Input, #form_container TextArea { + margin-bottom: 1; + } + + #form_container Label { + margin-top: 1; + color: $text; + } + + #form_container TextArea { + height: 5; + } + + #button_row { + width: 100%; + height: auto; + margin: 1 0; + } + + #button_row Button { + margin: 0 1; + } + + #status_container, #results_container, #output_container { + min-height: 10; + border: solid $accent; + margin: 1 0; + padding: 1; + } + + #status_scroll { + height: 30; + border: solid $accent; + margin-top: 1; + padding: 1; + } + + #research_container, #create_container { + width: 90; + height: auto; + padding: 2; + border: solid $primary; + background: $surface; + } + + #customer_list, #topic_list { + height: 10; + border: solid $accent; + margin: 1 0; + } + + #customer_list > .option-list--option, + #topic_list > .option-list--option { + padding: 1 1; + margin-bottom: 1; + } + + #help_text, #help_customer, #help_topic { + color: $text-muted; + margin-bottom: 1; + } + + #progress_container { + height: auto; + padding: 1; + margin: 1 0; + border: solid $accent; + background: $surface-darken-1; + } + + #progress_bar { + width: 100%; + margin: 1 0; + } + + #progress_status { + text-align: center; + margin-bottom: 1; + } + + #iteration_info { + text-align: center; + margin-top: 1; + } + + #title { + text-align: center; + padding: 1; + } + """ + + BINDINGS = [ + Binding("q", "quit", "Quit", show=True), + ] + + def on_mount(self) -> None: + """Set up the application on mount.""" + self.title = "LinkedIn Post Creation System" + self.sub_title = "Multi-Agent AI Workflow" + self.push_screen(WelcomeScreen()) + + +def run_app(): + """Run the TUI application.""" + app = LinkedInWorkflowApp() + app.run() diff --git a/src/web/__init__.py b/src/web/__init__.py new file mode 100644 index 0000000..405abb4 --- /dev/null +++ b/src/web/__init__.py @@ -0,0 +1 @@ +"""Web frontend package.""" diff --git a/src/web/admin/__init__.py b/src/web/admin/__init__.py new file mode 100644 index 0000000..0cefb43 --- /dev/null +++ b/src/web/admin/__init__.py @@ -0,0 +1,4 @@ +"""Admin panel module.""" +from src.web.admin.routes import admin_router + +__all__ = ["admin_router"] diff --git a/src/web/admin/auth.py b/src/web/admin/auth.py new file mode 100644 index 0000000..888759f --- /dev/null +++ b/src/web/admin/auth.py @@ -0,0 +1,32 @@ +"""Admin authentication (password-based).""" +import hashlib +import secrets +from fastapi import Request, HTTPException + +from src.config import settings + +# Authentication +WEB_PASSWORD = settings.web_password +SESSION_SECRET = settings.session_secret or secrets.token_hex(32) +AUTH_COOKIE_NAME = "linkedin_admin_auth" + + +def hash_password(password: str) -> str: + """Hash password with session secret.""" + return hashlib.sha256(f"{password}{SESSION_SECRET}".encode()).hexdigest() + + +def verify_auth(request: Request) -> bool: + """Check if request is authenticated for admin.""" + if not WEB_PASSWORD: + return True # No password set, allow access + cookie = request.cookies.get(AUTH_COOKIE_NAME) + if not cookie: + return False + return cookie == hash_password(WEB_PASSWORD) + + +async def require_auth(request: Request): + """Dependency to require admin authentication.""" + if not verify_auth(request): + raise HTTPException(status_code=302, headers={"Location": "/admin/login"}) diff --git a/src/web/admin/routes.py b/src/web/admin/routes.py new file mode 100644 index 0000000..c2570f7 --- /dev/null +++ b/src/web/admin/routes.py @@ -0,0 +1,693 @@ +"""Admin panel routes (password-protected).""" +import asyncio +import json +from pathlib import Path +from typing import Optional +from uuid import UUID + +from fastapi import APIRouter, Request, Form, BackgroundTasks, HTTPException +from fastapi.templating import Jinja2Templates +from fastapi.responses import HTMLResponse, RedirectResponse +from pydantic import BaseModel +from loguru import logger + +from src.config import settings +from src.database import db +from src.orchestrator import orchestrator +from src.email_service import email_service +from src.web.admin.auth import ( + WEB_PASSWORD, AUTH_COOKIE_NAME, hash_password, verify_auth +) +from src.web.user.auth import UserSession, set_user_session + +# Router with /admin prefix +admin_router = APIRouter(prefix="/admin", tags=["admin"]) + +# Templates +templates = Jinja2Templates(directory=Path(__file__).parent.parent / "templates" / "admin") + +# Store for progress updates +progress_store = {} + + +async def get_customer_profile_picture(customer_id: UUID) -> Optional[str]: + """Get profile picture URL from customer's LinkedIn posts.""" + linkedin_posts = await db.get_linkedin_posts(customer_id) + for lp in linkedin_posts: + if lp.raw_data and isinstance(lp.raw_data, dict): + author = lp.raw_data.get("author", {}) + if author and isinstance(author, dict): + profile_picture_url = author.get("profile_picture") + if profile_picture_url: + return profile_picture_url + return None + + +# ==================== AUTH ROUTES ==================== + +@admin_router.get("/login", response_class=HTMLResponse) +async def login_page(request: Request, error: str = None): + """Admin login page.""" + if not WEB_PASSWORD: + return RedirectResponse(url="/admin", status_code=302) + if verify_auth(request): + return RedirectResponse(url="/admin", status_code=302) + return templates.TemplateResponse("login.html", { + "request": request, + "error": error + }) + + +@admin_router.post("/login") +async def login(request: Request, password: str = Form(...)): + """Handle admin login.""" + if password == WEB_PASSWORD: + response = RedirectResponse(url="/admin", status_code=302) + response.set_cookie( + key=AUTH_COOKIE_NAME, + value=hash_password(WEB_PASSWORD), + httponly=True, + max_age=60 * 60 * 24 * 7, + samesite="lax" + ) + return response + return RedirectResponse(url="/admin/login?error=invalid", status_code=302) + + +@admin_router.get("/logout") +async def logout(): + """Handle admin logout.""" + response = RedirectResponse(url="/admin/login", status_code=302) + response.delete_cookie(AUTH_COOKIE_NAME) + return response + + +@admin_router.get("/impersonate/{customer_id}") +async def impersonate_user(request: Request, customer_id: UUID): + """Login as a user without OAuth (for testing). + + Creates a user session for the given customer and redirects to the user dashboard. + Only accessible by authenticated admins. + """ + if not verify_auth(request): + return RedirectResponse(url="/admin/login", status_code=302) + + try: + customer = await db.get_customer(customer_id) + if not customer: + raise HTTPException(status_code=404, detail="Customer not found") + + # Extract vanity name from LinkedIn URL if available + linkedin_vanity = "" + if customer.linkedin_url: + import re + match = re.search(r'linkedin\.com/in/([^/?]+)', customer.linkedin_url) + if match: + linkedin_vanity = match.group(1) + + # Get profile picture + profile_picture = await get_customer_profile_picture(customer_id) + + # Create user session + session = UserSession( + customer_id=str(customer.id), + customer_name=customer.name, + linkedin_vanity_name=linkedin_vanity or customer.name.lower().replace(" ", "-"), + linkedin_name=customer.name, + linkedin_picture=profile_picture, + email=customer.email + ) + + # Redirect to user dashboard with session cookie + response = RedirectResponse(url="/", status_code=302) + set_user_session(response, session) + return response + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error impersonating user: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +# ==================== PAGES ==================== + +@admin_router.get("", response_class=HTMLResponse) +@admin_router.get("/", response_class=HTMLResponse) +async def home(request: Request): + """Admin dashboard.""" + if not verify_auth(request): + return RedirectResponse(url="/admin/login", status_code=302) + try: + customers = await db.list_customers() + total_posts = 0 + for customer in customers: + posts = await db.get_generated_posts(customer.id) + total_posts += len(posts) + + return templates.TemplateResponse("dashboard.html", { + "request": request, + "page": "home", + "customers_count": len(customers), + "total_posts": total_posts + }) + except Exception as e: + logger.error(f"Error loading dashboard: {e}") + return templates.TemplateResponse("dashboard.html", { + "request": request, + "page": "home", + "error": str(e) + }) + + +@admin_router.get("/customers/new", response_class=HTMLResponse) +async def new_customer_page(request: Request): + """New customer setup page.""" + if not verify_auth(request): + return RedirectResponse(url="/admin/login", status_code=302) + return templates.TemplateResponse("new_customer.html", { + "request": request, + "page": "new_customer" + }) + + +@admin_router.get("/research", response_class=HTMLResponse) +async def research_page(request: Request): + """Research topics page.""" + if not verify_auth(request): + return RedirectResponse(url="/admin/login", status_code=302) + customers = await db.list_customers() + return templates.TemplateResponse("research.html", { + "request": request, + "page": "research", + "customers": customers + }) + + +@admin_router.get("/create", response_class=HTMLResponse) +async def create_post_page(request: Request): + """Create post page.""" + if not verify_auth(request): + return RedirectResponse(url="/admin/login", status_code=302) + customers = await db.list_customers() + return templates.TemplateResponse("create_post.html", { + "request": request, + "page": "create", + "customers": customers + }) + + +@admin_router.get("/posts", response_class=HTMLResponse) +async def posts_page(request: Request): + """View all posts page.""" + if not verify_auth(request): + return RedirectResponse(url="/admin/login", status_code=302) + try: + customers = await db.list_customers() + customers_with_posts = [] + + for customer in customers: + posts = await db.get_generated_posts(customer.id) + profile_picture = await get_customer_profile_picture(customer.id) + customers_with_posts.append({ + "customer": customer, + "posts": posts, + "post_count": len(posts), + "profile_picture": profile_picture + }) + + return templates.TemplateResponse("posts.html", { + "request": request, + "page": "posts", + "customers_with_posts": customers_with_posts, + "total_posts": sum(c["post_count"] for c in customers_with_posts) + }) + except Exception as e: + logger.error(f"Error loading posts: {e}") + return templates.TemplateResponse("posts.html", { + "request": request, + "page": "posts", + "customers_with_posts": [], + "total_posts": 0, + "error": str(e) + }) + + +@admin_router.get("/posts/{post_id}", response_class=HTMLResponse) +async def post_detail_page(request: Request, post_id: str): + """Detailed view of a single post.""" + if not verify_auth(request): + return RedirectResponse(url="/admin/login", status_code=302) + try: + post = await db.get_generated_post(UUID(post_id)) + if not post: + return RedirectResponse(url="/admin/posts", status_code=302) + + customer = await db.get_customer(post.customer_id) + linkedin_posts = await db.get_linkedin_posts(post.customer_id) + reference_posts = [p.post_text for p in linkedin_posts if p.post_text and len(p.post_text) > 100][:10] + + profile_picture_url = None + for lp in linkedin_posts: + if lp.raw_data and isinstance(lp.raw_data, dict): + author = lp.raw_data.get("author", {}) + if author and isinstance(author, dict): + profile_picture_url = author.get("profile_picture") + if profile_picture_url: + break + + profile_analysis_record = await db.get_profile_analysis(post.customer_id) + profile_analysis = profile_analysis_record.full_analysis if profile_analysis_record else None + + post_type = None + post_type_analysis = None + if post.post_type_id: + post_type = await db.get_post_type(post.post_type_id) + if post_type and post_type.analysis: + post_type_analysis = post_type.analysis + + final_feedback = None + if post.critic_feedback and len(post.critic_feedback) > 0: + final_feedback = post.critic_feedback[-1] + + return templates.TemplateResponse("post_detail.html", { + "request": request, + "page": "posts", + "post": post, + "customer": customer, + "reference_posts": reference_posts, + "profile_analysis": profile_analysis, + "post_type": post_type, + "post_type_analysis": post_type_analysis, + "final_feedback": final_feedback, + "profile_picture_url": profile_picture_url + }) + except Exception as e: + logger.error(f"Error loading post detail: {e}") + return RedirectResponse(url="/admin/posts", status_code=302) + + +@admin_router.get("/status", response_class=HTMLResponse) +async def status_page(request: Request): + """Customer status page.""" + if not verify_auth(request): + return RedirectResponse(url="/admin/login", status_code=302) + try: + customers = await db.list_customers() + customer_statuses = [] + + for customer in customers: + status = await orchestrator.get_customer_status(customer.id) + profile_picture = await get_customer_profile_picture(customer.id) + customer_statuses.append({ + "customer": customer, + "status": status, + "profile_picture": profile_picture + }) + + return templates.TemplateResponse("status.html", { + "request": request, + "page": "status", + "customer_statuses": customer_statuses + }) + except Exception as e: + logger.error(f"Error loading status: {e}") + return templates.TemplateResponse("status.html", { + "request": request, + "page": "status", + "customer_statuses": [], + "error": str(e) + }) + + +@admin_router.get("/scraped-posts", response_class=HTMLResponse) +async def scraped_posts_page(request: Request): + """Manage scraped LinkedIn posts.""" + if not verify_auth(request): + return RedirectResponse(url="/admin/login", status_code=302) + customers = await db.list_customers() + return templates.TemplateResponse("scraped_posts.html", { + "request": request, + "page": "scraped_posts", + "customers": customers + }) + + +# ==================== API ENDPOINTS ==================== + +@admin_router.post("/api/customers") +async def create_customer( + background_tasks: BackgroundTasks, + name: str = Form(...), + linkedin_url: str = Form(...), + company_name: str = Form(None), + email: str = Form(None), + persona: str = Form(None), + form_of_address: str = Form(None), + style_guide: str = Form(None), + post_types_json: str = Form(None) +): + """Create a new customer and run initial setup.""" + task_id = f"setup_{name}_{asyncio.get_event_loop().time()}" + progress_store[task_id] = {"status": "starting", "message": "Starte Setup...", "progress": 0} + + customer_data = { + "company_name": company_name, + "email": email, + "persona": persona, + "form_of_address": form_of_address, + "style_guide": style_guide, + "topic_history": [], + "example_posts": [] + } + + post_types_data = None + if post_types_json: + try: + post_types_data = json.loads(post_types_json) + except json.JSONDecodeError: + logger.warning("Failed to parse post_types_json") + + async def run_setup(): + try: + progress_store[task_id] = {"status": "running", "message": "Erstelle Kunde...", "progress": 10} + await asyncio.sleep(0.1) + progress_store[task_id] = {"status": "running", "message": "Scrape LinkedIn Posts...", "progress": 30} + + customer = await orchestrator.run_initial_setup( + linkedin_url=linkedin_url, + customer_name=name, + customer_data=customer_data, + post_types_data=post_types_data + ) + + progress_store[task_id] = { + "status": "completed", + "message": "Setup abgeschlossen!", + "progress": 100, + "customer_id": str(customer.id) + } + except Exception as e: + logger.exception(f"Setup failed: {e}") + progress_store[task_id] = {"status": "error", "message": str(e), "progress": 0} + + background_tasks.add_task(run_setup) + return {"task_id": task_id} + + +@admin_router.get("/api/tasks/{task_id}") +async def get_task_status(task_id: str): + """Get task progress.""" + return progress_store.get(task_id, {"status": "unknown", "message": "Task not found"}) + + +@admin_router.get("/api/customers/{customer_id}/post-types") +async def get_customer_post_types(customer_id: str): + """Get post types for a customer.""" + try: + post_types = await db.get_post_types(UUID(customer_id)) + return { + "post_types": [ + { + "id": str(pt.id), + "name": pt.name, + "description": pt.description, + "identifying_hashtags": pt.identifying_hashtags, + "identifying_keywords": pt.identifying_keywords, + "semantic_properties": pt.semantic_properties, + "has_analysis": pt.analysis is not None, + "analyzed_post_count": pt.analyzed_post_count, + "is_active": pt.is_active + } + for pt in post_types + ] + } + except Exception as e: + logger.error(f"Error loading post types: {e}") + return {"post_types": [], "error": str(e)} + + +@admin_router.get("/api/customers/{customer_id}/linkedin-posts") +async def get_customer_linkedin_posts(customer_id: str): + """Get all scraped LinkedIn posts for a customer.""" + try: + posts = await db.get_linkedin_posts(UUID(customer_id)) + result_posts = [] + for post in posts: + try: + result_posts.append({ + "id": str(post.id), + "post_text": post.post_text, + "post_url": post.post_url, + "posted_at": post.post_date.isoformat() if post.post_date else None, + "engagement_score": (post.likes or 0) + (post.comments or 0) + (post.shares or 0), + "likes": post.likes, + "comments": post.comments, + "shares": post.shares, + "post_type_id": str(post.post_type_id) if post.post_type_id else None, + "classification_method": post.classification_method, + "classification_confidence": post.classification_confidence + }) + except Exception as post_error: + logger.error(f"Error processing post {post.id}: {post_error}") + return {"posts": result_posts, "total": len(result_posts)} + except Exception as e: + logger.exception(f"Error loading LinkedIn posts: {e}") + return {"posts": [], "total": 0, "error": str(e)} + + +class ClassifyPostRequest(BaseModel): + post_type_id: Optional[str] = None + + +@admin_router.patch("/api/linkedin-posts/{post_id}/classify") +async def classify_linkedin_post(post_id: str, request: ClassifyPostRequest): + """Manually classify a LinkedIn post.""" + try: + if request.post_type_id: + await db.update_post_classification( + post_id=UUID(post_id), + post_type_id=UUID(request.post_type_id), + classification_method="manual", + classification_confidence=1.0 + ) + else: + await asyncio.to_thread( + lambda: db.client.table("linkedin_posts").update({ + "post_type_id": None, + "classification_method": None, + "classification_confidence": None + }).eq("id", post_id).execute() + ) + return {"success": True, "post_id": post_id} + except Exception as e: + logger.error(f"Error classifying post: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@admin_router.post("/api/customers/{customer_id}/classify-posts") +async def classify_customer_posts(customer_id: str, background_tasks: BackgroundTasks): + """Trigger post classification for a customer.""" + task_id = f"classify_{customer_id}_{asyncio.get_event_loop().time()}" + progress_store[task_id] = {"status": "starting", "message": "Starte Klassifizierung...", "progress": 0} + + async def run_classification(): + try: + progress_store[task_id] = {"status": "running", "message": "Klassifiziere Posts...", "progress": 50} + count = await orchestrator.classify_posts(UUID(customer_id)) + progress_store[task_id] = { + "status": "completed", + "message": f"{count} Posts klassifiziert", + "progress": 100, + "classified_count": count + } + except Exception as e: + logger.exception(f"Classification failed: {e}") + progress_store[task_id] = {"status": "error", "message": str(e), "progress": 0} + + background_tasks.add_task(run_classification) + return {"task_id": task_id} + + +@admin_router.post("/api/customers/{customer_id}/analyze-post-types") +async def analyze_customer_post_types(customer_id: str, background_tasks: BackgroundTasks): + """Trigger post type analysis for a customer.""" + task_id = f"analyze_{customer_id}_{asyncio.get_event_loop().time()}" + progress_store[task_id] = {"status": "starting", "message": "Starte Analyse...", "progress": 0} + + async def run_analysis(): + try: + progress_store[task_id] = {"status": "running", "message": "Analysiere Post-Typen...", "progress": 50} + results = await orchestrator.analyze_post_types(UUID(customer_id)) + analyzed_count = sum(1 for r in results.values() if r.get("sufficient_data")) + progress_store[task_id] = { + "status": "completed", + "message": f"{analyzed_count} Post-Typen analysiert", + "progress": 100, + "results": results + } + except Exception as e: + logger.exception(f"Analysis failed: {e}") + progress_store[task_id] = {"status": "error", "message": str(e), "progress": 0} + + background_tasks.add_task(run_analysis) + return {"task_id": task_id} + + +@admin_router.get("/api/customers/{customer_id}/topics") +async def get_customer_topics(customer_id: str, include_used: bool = False, post_type_id: str = None): + """Get research topics for a customer.""" + try: + if post_type_id: + all_research = await db.get_all_research(UUID(customer_id), UUID(post_type_id)) + else: + all_research = await db.get_all_research(UUID(customer_id)) + + used_topic_titles = set() + if not include_used: + generated_posts = await db.get_generated_posts(UUID(customer_id)) + for post in generated_posts: + if post.topic_title: + used_topic_titles.add(post.topic_title.lower().strip()) + + all_topics = [] + for research in all_research: + if research.suggested_topics: + for topic in research.suggested_topics: + topic_title = topic.get("title", "").lower().strip() + if topic_title in used_topic_titles: + continue + topic["research_id"] = str(research.id) + topic["target_post_type_id"] = str(research.target_post_type_id) if research.target_post_type_id else None + all_topics.append(topic) + + return {"topics": all_topics, "used_count": len(used_topic_titles), "available_count": len(all_topics)} + except Exception as e: + logger.error(f"Error loading topics: {e}") + return {"topics": [], "error": str(e)} + + +@admin_router.post("/api/research") +async def start_research(background_tasks: BackgroundTasks, customer_id: str = Form(...), post_type_id: str = Form(None)): + """Start research for a customer.""" + task_id = f"research_{customer_id}_{asyncio.get_event_loop().time()}" + progress_store[task_id] = {"status": "starting", "message": "Starte Recherche...", "progress": 0} + + async def run_research(): + try: + def progress_callback(message: str, step: int, total: int): + progress_store[task_id] = {"status": "running", "message": message, "progress": int((step / total) * 100)} + + topics = await orchestrator.research_new_topics( + UUID(customer_id), + progress_callback=progress_callback, + post_type_id=UUID(post_type_id) if post_type_id else None + ) + progress_store[task_id] = {"status": "completed", "message": f"{len(topics)} Topics gefunden!", "progress": 100, "topics": topics} + except Exception as e: + logger.exception(f"Research failed: {e}") + progress_store[task_id] = {"status": "error", "message": str(e), "progress": 0} + + background_tasks.add_task(run_research) + return {"task_id": task_id} + + +@admin_router.post("/api/posts") +async def create_post(background_tasks: BackgroundTasks, customer_id: str = Form(...), topic_json: str = Form(...), post_type_id: str = Form(None)): + """Create a new post.""" + task_id = f"post_{customer_id}_{asyncio.get_event_loop().time()}" + progress_store[task_id] = {"status": "starting", "message": "Starte Post-Erstellung...", "progress": 0} + topic = json.loads(topic_json) + + async def run_create_post(): + try: + def progress_callback(message: str, iteration: int, max_iterations: int, score: int = None, versions: list = None, feedback_list: list = None): + progress = int((iteration / max_iterations) * 100) if iteration > 0 else 5 + score_text = f" (Score: {score}/100)" if score else "" + progress_store[task_id] = { + "status": "running", "message": f"{message}{score_text}", "progress": progress, + "iteration": iteration, "max_iterations": max_iterations, + "versions": versions or [], "feedback_list": feedback_list or [] + } + + result = await orchestrator.create_post( + customer_id=UUID(customer_id), topic=topic, max_iterations=3, + progress_callback=progress_callback, + post_type_id=UUID(post_type_id) if post_type_id else None + ) + progress_store[task_id] = { + "status": "completed", "message": "Post erstellt!", "progress": 100, + "result": { + "post_id": str(result["post_id"]), "final_post": result["final_post"], + "iterations": result["iterations"], "final_score": result["final_score"], "approved": result["approved"] + } + } + except Exception as e: + logger.exception(f"Post creation failed: {e}") + progress_store[task_id] = {"status": "error", "message": str(e), "progress": 0} + + background_tasks.add_task(run_create_post) + return {"task_id": task_id} + + +@admin_router.get("/api/posts") +async def get_all_posts(): + """Get all posts as JSON.""" + customers = await db.list_customers() + all_posts = [] + for customer in customers: + posts = await db.get_generated_posts(customer.id) + for post in posts: + all_posts.append({ + "id": str(post.id), "customer_name": customer.name, "topic_title": post.topic_title, + "content": post.post_content, "iterations": post.iterations, "status": post.status, + "created_at": post.created_at.isoformat() if post.created_at else None + }) + return {"posts": all_posts, "total": len(all_posts)} + + +class EmailRequest(BaseModel): + recipient: str + post_id: str + + +@admin_router.get("/api/email/config") +async def get_email_config(request: Request): + """Check if email is configured.""" + if not verify_auth(request): + raise HTTPException(status_code=401, detail="Not authenticated") + return {"configured": email_service.is_configured(), "default_recipient": settings.email_default_recipient or ""} + + +@admin_router.post("/api/email/send") +async def send_post_email(request: Request, email_request: EmailRequest): + """Send a post via email.""" + if not verify_auth(request): + raise HTTPException(status_code=401, detail="Not authenticated") + if not email_service.is_configured(): + raise HTTPException(status_code=400, detail="E-Mail ist nicht konfiguriert.") + + try: + post = await db.get_generated_post(UUID(email_request.post_id)) + if not post: + raise HTTPException(status_code=404, detail="Post nicht gefunden") + + customer = await db.get_customer(post.customer_id) + score = None + if post.critic_feedback and len(post.critic_feedback) > 0: + score = post.critic_feedback[-1].get("overall_score") + + success = email_service.send_post( + recipient=email_request.recipient, post_content=post.post_content, + topic_title=post.topic_title or "LinkedIn Post", + customer_name=customer.name if customer else "Unbekannt", score=score + ) + if success: + return {"success": True, "message": f"E-Mail wurde an {email_request.recipient} gesendet"} + else: + raise HTTPException(status_code=500, detail="E-Mail konnte nicht gesendet werden.") + except HTTPException: + raise + except Exception as e: + logger.error(f"Error sending email: {e}") + raise HTTPException(status_code=500, detail=f"Fehler beim Senden: {str(e)}") diff --git a/src/web/app.py b/src/web/app.py new file mode 100644 index 0000000..160dba8 --- /dev/null +++ b/src/web/app.py @@ -0,0 +1,39 @@ +"""FastAPI web frontend for LinkedIn Post Creation System.""" +from pathlib import Path + +from fastapi import FastAPI +from fastapi.staticfiles import StaticFiles +from fastapi.responses import RedirectResponse + +from src.config import settings +from src.web.admin import admin_router + +# Setup +app = FastAPI(title="LinkedIn Post Creation System") + +# Static files +app.mount("/static", StaticFiles(directory=Path(__file__).parent / "static"), name="static") + +# Include admin router (always available) +app.include_router(admin_router) + +# Include user router if enabled +if settings.user_frontend_enabled: + from src.web.user import user_router + app.include_router(user_router) +else: + # Root redirect only when user frontend is disabled + @app.get("/") + async def root(): + """Redirect root to admin frontend.""" + return RedirectResponse(url="/admin", status_code=302) + + +def run_web(): + """Run the web server.""" + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) + + +if __name__ == "__main__": + run_web() diff --git a/src/web/static/logo.png b/src/web/static/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..581b3f1b68ea05fd09df50d84f3235575bb9412d GIT binary patch literal 53598 zcmeFYhf`DC_dQHUMZk{Ic~C(SP&%QD6hY~NAOsbVCN&9=gir-+6cqub_f7z*fdmi* z5l!e(2nk3@NFb3CA_U0G=l5s4^PRafcjnHSd+yn1-+k6zd+ir?HWtFi#E)@taS7kH zyyd{f#Y^Gh;*L5ZaQI}hO_)3TM<%sMTP6!uyZ+At|7U^!zgqwxJd0v#28VP)Xo0xz^n*3h0ctpyX_UVD zGL4fjTk8uj@7bG6RMkn_X(Ga|wNJ2i5BzfVzBWvFg}L6tg3!t7;mX5X`9DiO;jegf zxnQXut9l$)l|!Jz#faLr?GKcvOXPsiV4YazL30FhelKF?)#m*J>sita+P0v55@8fn z+hf$M#Dt}nP_PsA4_uzyubJDdc+d{)KyDkw&_SxYS87QJ=jAx41>t!mrwUf#WrzE^ z_EBa$b#Q$95#LTJOUu=uQf!jSsTuS}*+vH$WM)PMqFbl-))eZ)L z2^~lxD%{6iv~+y(GX(Rq8=FoSj0q+WY=t>VEWBI4lT{x-JsTM-r8SMbG>(}nOto>5 z1m%~GcGj@oy^*Q`iO_tj9|%fOS8Wjy*JhPyv4*O(?5^5La<6?|j7uvjeR7$$U5A65 zF(a!o=h||)wcn>lFJ8)tBa2DHS96_IesjQ^%i1AVX{b+)+A&l|k~4lScjw09d0H~I zvtw0uLOL2<5Oz(+Q{ho`c0Zu!USvFCgct5aW=_{f@cm=XlN*5&QdK>WrJK+y zY)Eqqu*pliL~rj%oFs-HuvcK!6SBWGL)zRhWjo(3yf7H{GsB*}Geu_!o&wJWR!O}c=ob6)UnCw+wBgB5|dxy-PTqyTa`8M%0I{11-lvpp4SywP=y4yib zeR3egN#$4}=AuHQbrc3~NO=iD((uzN?yj17Iu+9?ZVYW#gOJHR$@IG5^d=GnI*tnR z**B09^CY$yHSdzp@?}~&-6q9wtXWOzDpEXeLh3+ukgPz5&Iv+(+?USBl`q^+&rkb6 zU3wAc0WRWMLSbl-uC?w-kjq`Xu|f7AAbp##EWUx|GboJxBTCw*#_mbHL*$DKJPr%w(#;I!lA$qfbS7s%EFXrJ-GHuByk0!5-q!|Pz}wSXe_ zW-jdO<^x>lw(CG3DRy7*J*0DbEW&NMT^m7c|NVqnK(Ami3j>DO0GC!m%c?W$!N5+$ zX$lfDeOwmE{PROEtVGc*i)1{hUYpVtd_N%Ph&bnKVQfd=&z`71yS76IzdaGlJp(a+ zh9o#Y?bmu>yB}gZThhYC*knAiXLPu2Zz8pj#Ucmo?%Mxulc-}cs%W&0+rKFHJ657- z|6Jy_lD&f@!!axMNXR1wIB-dqm7>R?6-Lj^7mkJblUL6VM21T0MrerZ*hrSP3Z5x0 z9yd*8=@is(0D@}U*5oowCCC}KfXSiWSB|{Fi4B$%OnY$$lDR;Z;^H|EQLERy$hyB; zinm6dHrnn>Au6)xS)8RbzsRHCb*J)-(z>vdEY8rr(O{=*vrHF^q}u#59kV9Y(;Jv|!;s^}z>pQHBkP0j z^c0h0Ya@L_IYpo@gZ|3E*JLHmT}VAq-eS7at=OlRYh?2 zK9w$wqDI-JtSsR0o>50w!#yo_qS5Y+_G5b@%!DYcwJcklJW+ERM#Hi(0@zJUB~eu>lkOPR0i=Ys* zY1Wzz>GB3-w-VE-M&t~}V%Ox!D!=@NsStqMO0Sc_Q`Y4oU)0o|G;^PYSS0xs5-{Yx z^gY#5PBCs>${mR^hbD&xRjkKES08AG_=bSEeYms-^FiCYti9hoC3OQ(Hl~&V-%Q`D zWXIkczS=5WxI|dbkJg5`v(i5CH9impLb=zhsRqpFG!vHPYmV^>JEp4*+}(zb?5aRi z=-2G`&T#&brXax~Q8QgJ<(TDm#U0hsw>IKgS1g=OJY`iMR;mQ}eFLZ@MLQ-^{6cA^60e%+}>p$4`8zlY;`3f+MCCYal%eMoUXHyY6@MPFxKD9 zXfORn^%oldC@z)&l3v~;%N$TV_svEB zNr<_=uS~fkyfwd4WT0E?p8YZZO7X<+J9e(f7ap`Nv+GpZ0X5F7^??y%-EFX6&(g_) zznsTj@Vp=dD$Z(Z>WzU8zM&?i^|509zTv!k3#(c93NT88{6>aIssC>xlAXoO>JXuw zw*8-F!DjWz@2$j=#}#ld+FTx0p7{xgEQNn6RL|F*gue%*5I*fm7I<4+;Uv<&>Z$Ia zWp;;c_ZYS8#n$%kzf^l-TJNC0M9(;h4`I;LY1^C*>1k!K z1Eza|mT0jIYOFWZv3iXf?!iKlpyb$xP{-Q}DOSCtyP*t$FJ54QqXd4Myq)bY_?i5Y zWxH)d>k4UVopYVIYGqvnBwvpTRhN2da zgQg`LH&+??$Gj^z#R1BV>l^>>x+6if|>HQ z7xcPmr{`yfYD?ay$=wpaFn9_EC=q*=q(cok9C8HA54aRqbb4*zw z>TQ$I(`7P%5MMCK@3sm9y>WNVP#3 zLMt@VN7V;rFijU`MB!wvx$++zPH4qj?zJ<+ARD zOWw~~iMc7Y^iSy4k`KAymekoQcuX9kx|D7r*!{8cr0Av4gZV%i*xH~>Yz}PQL)0k7 z3w&pvNrABfKP>b!X&LyEhkt`FPV8I$$>AnPB~(99c6(X_BFOVE5yZeDNc_;4?z@#% ztQ0fe#<-xVHF4=8RV`nb3)TIu6reb+iSouM4`@!)L5}dah5r3)8z4s%5?k`q!Rh?k z^*s3~HQfee7a7rm@7=$z4&-_6$DC z1hk;qcecr5<{dnXsu*;4AK%7Ed;A7VLFB@ML`P+<9N~|Brq?+ZLwW7K7_UDWzhuY^`u`~%UU#0nQC@&@htvIBZJ390_mIFD#VQ?+i zh_%$y2gc~cg5-M=?f4-kCqx;DcyLi-@^N-QZvvtW(`Iu~|E7kl!gWrvMGtH^)vKw+ zX7Dv?qedZEpEK>-v$upd-27Vkw+zlJjuh{y4-_;eql)FeGC=vEsa!xLX1kM&VGgOr zbRJZ2w%w6%k9^v%s)nuRNo!&|6H~{!=Md?w3fuer=tV0^9bK^nX|1Dq?};8D+xl8; zAdZ{|!vMofHTo6m#gik$w14vH20OXN0%TK&lUHMPsRF;&;8_T-<>)U@2yyN`-)r$4 zqwob?q-YEw>khv8bzly_$|6@bf1xSj8n&?sF={sWvLu{R_J#IiQ&dOrN2fycrn=`r zuGs89+Jp0WF{Ee2{<1Q?J``t%w0Jn2PyS6Qf`+7iln{F|P2Q z1-nM121L=6`L{^I?3UsSV|PqFyOZoiJvWAYXJVt4T5)X~KI!hU2R`YBn|toIm21Gg z;`gm=!j=bNvqS5JMpgbTNKZ-Abfa?X*CSUjhqw@s#|w+2ypoC9o2ot7d!vfjXBS|Y z34(=~rKMK~tD|jqcA9&ly5Fl|H7{F+$!R&x-}&VG6| zX#Ol?e_~U9;r`!3#im`uww2A4RRfu>B=oASp-y(-wuPvc*8|V?j(WtCAm~HYoqm?g z)yUSa7!KOg?J8|UrA=V_be&#Rmy1UcG296fvTLXxTT1y8kRX(*jz14d1v#YGe}2uk zyp^SwQ)_bNd_+~|(NKw1!+pXoji(>eJtX|07hK-(Uv9>)XrYFzy zOf0>5z{ix{1)jFgcUiEGORKbwvpFLB2p$maVC3%v;0tLM$lLD-d)ml2_$(HJng5^~ z9k))A6cR6pDh-~ETq@`Kb)4ep^^%rM3Xv;9C&tlA6SBF6^)i-sD^V96J*t@vlW!)s z1tH5t4Gq-v$ys6MH*0NA!TS>|iHvMkE4Fwxm9v%+`%UKn7?Hs}QC&z#l$?0+lnan~ z?XIc%w%lSq$$9(ffBJuSYv*q-{DY($9{6+bPIFW%c1vs|g9gK#QY55Zp=0-KZKqGe zS0+<_wtTRHS?Vxf0?f&RIKz}p7sJif%}k~^&xj9ccL%ApP@pu9HjBeX;*Uu6X0d69 zq(++5elN@KIG|@fKS#JhK_}8!5PPG@J;M;Glwq^~wFAG7Ai!7jOC1xj!f9UJlrHN9 ztVEt|(2WBB*SF8*@h-(_$sUUqvT_ypj?P5?H7yLdbVA9J4A*hdM{+#|ghU^Q9cCa= zq{UcA#@VJ4?41Mc49w9+$niFlT^oVC^zVHD?(SSugtA!i{_{3I#sRPuwRX9SmQLzY z<7K*V=v~rcRZ7Q!MGaxpzlka6z?W^G>MZQR>yrkt(%6R8G#-%nn+?9J$M4BSZ>SyH zyPgsr-_SjfAgCSJKaA=gdUAd=5?r;;`0XuhH%trp_jwbdQYqpc%yUY1tWnAFHjeb~wPq-nd&V66qvtg>eyw9@#t-wWc zh5)8J>lm8PYW@6>l7(N>XWte7%K370?|PuGoFI5RY5u(agw%-t9W7fsL2&wXsN z-(qN*AeslZd2@ENR}_#poR}t9IFK1rXs%QcC1|&n80oj-W{=+9tVh*5(0)CsZm2z| zLvJ%y3Rw{1Bd20tlbDqLq)C%s1mXFuxhR-lB+c^Uog(6f zA~*y!P%)lMI`u0rar(Z)=U43e4$zb{j|rb&ap$h|NKwlumlY!|3-$CWm>bjK`=^41 z%NA0X?kiRoi_bzh|Ghf}Vs>*IdMA$r0g|UP%2w_R{m5kz7=1My#0PTR>I2Nb$WJT8 z&*&`pHS#OieDS=(huaTan+@J30xT8Ym;!o#8aN)#AYmqZbgFt-QXy$o>1`tUR+95~%orn*H&+Pld%Ux78wCFNCGzM8 z3#qTJWz|z6$*=0COO)$k6ShuP)h?KX#yrTtaDVaTVl7Dp@lioL^d24iP}0|{ue;Fy z(xv?#(lZWHdt!0NsmPuDT5YL`S_==a34>LHw8}}dQIY0Gca{I7`QkcHWqk7z$r1Yg zAd0$xla3nw;ngmH)fVY*OHAkyxFzKl*;{^(o2(=Ft1WalPhl z(_y59Q4i5|ft&>!!GE-=YQT=G4(|LN& zX{~iUsD7wW2!jYJfoXjGNPizhS-4=mSxkb>eUC;ujp4KEyG>!jk>sc&-*g2IcB?WD zHsCHNY=(TVAo8psr)gv#Z%ts$3{u-8dCX!5_2wTT*t&M!MHT zt+v6?LW!T)$tmLLKL0vhQ64qTwrZ)>c;UGp3piNmvwSJsXvr`6LJacz1%}n~F_^SGQPeL#;-qTaXz(NT`b{7AN#=k=r!3*Kh7ohCW zE1s^VskU?%ulDdIUQksu)gkE|uSt{=t;I{)TqtuQAW~c$IuiI?PuLtPm7V<1$+)5* zoS2GTD#OYi%{%FmB?zqu5>KA-bulD zR_sGVm=*L-Z3-}sUSya(&W0Imv<$;n2f~*A@w6KQn2tnn3Yc0cUO?tQ67{#!@C=n+ z;0k%RVKnhU{%=9b@$)(GiznnXTx%3IHipVT)dg z64?wLOKVPMUpHg8x@48CnF=-nWg&t+0_q;~WuR}QG)%F!k7?s9$5WG;Oi zke^KUeP=EW)(luQaankqe346MD^JXLYW-}{c#*T$$Lq049f8MDyf9`FuXRCO(CIQZ z#)(o6zTQAaxNPEfBc;kSTlY^!v6haJYg~AkZ9$wL(_xGZB9SqIZHE7<<+jdAelm9# z-x75Uiq8Q;{^Nc`K1**fS0LUJyvUV-<)?SHnoiJOCqJQZ+P%KO_s`e9OBK1q^};R^ zz?B(w{9oYFuD3p{42qL`uJGdo9)|IJlmjl4E6sO)uTBZ#iqq)R-B$M8r>>eHi`VjG z7!)TEr0xbip207(Z_4N_= z*YDGY78-7TUEO5sP+)QN;{RS1ZN!o-cmS5W7v zQIq`CneLUdW3-*L63SFPa`e8dia4zE@Ai7AvnXo7bj;Gp)IRm3W=E}yZGoZd%a@#BopVDn4Yd*?-$OhtXf;6z)ulPu zY;mRtTQzFC^TLSdzg3s%<^flpm1D)s6XvN9X45<6ynpmA+kH7zG#Qj1V%!r&HFqGBI#_|X8#NO^B*FFze)alCEnN&on!{Blm~Pvs zH3+VLtJtXH52Lnp~mt?mf z=i?ct_IBpv&ASMQ>RL2IQx%ocT`L_HEjhx!;$G=3LDo58#c-W{PiUf(@~?59%D7`8 z10-pg!vU&mojM+a;DR4yZ$XCcbrs{-zU&^gl=cM!7qy-&yw{2OQ1MWe^?i52?{-GG zYj$W#)^V&Jz(@M`C?E~y{p#}{f9sYqSh~@tmmMDu6b7}AE=+yxbS@!gm9_f@iRA?n zn5XhUn5!8=WAS%@j#pcxI_^Ezp#qq3sMxoSvw!64`WY5u<-S)c`3FjmKLT#rVBOLC2-W zR^mz;3SAVSXV&sN&PR)DO7oB+nTU|>A4PZG^Zf7&IIDSc;>5|Ec7g5Gwa_%~sEwyx zmF~vM>FrZ4|C~EpR=MgK)js_6@+CKwzRvz*<%uxn^Whd!^T`a}+fRpYFj?QDjMzoVUa3_AzkklG1^Rg&R zDp5rgRpBZD9q}neTlGaWw=9zQ{7J#@T`Qgr(wXCdBWzzjYCGz z(NRrVx@JvoMnW!N59)QRJTY-<-15xak4MV_rzZm&27ZLanO+Y4&39e?KJf|lcvT~? zFM6wB3V*8h!yi|o^LHw(G=~c@QKZjD5m?!1a8%IH%uPAdrI4SZDC#J^+j_O2tS!}~ z5*U~>@Rsa0*f*aCqe+9gQSg!XUp??sC4y*ex>Q3z34oLheI3!$&=^5+^kynyIV-9TDal5|D|-^X#VBQjS``W+9rguwsMG?aYvTOk~CH2d4u#; zy`4&}EXIGfmv2_&PwUpwXxrrZQhG+3_UvG}%(2Lx#6lx`aMCN|0+0;I1}R?vGI&0z zP85$3O3j!$aUrUE;(5gzMID)>0+(Eg9Vz{`_XnMBs7aEpe{C=D(JcO6EgLf&uH5dL zsxuOpUwPrk8ap|TI8We4U;4ZIW0x>tSm5ycx$TT&^H3sI|F?$5d+5It6HXzmkyn|N zqvDhI`N8@W4K>l9iA3&vTHxVr&@YY4BLj%Mu{KVb)=_ozu(uFE@-GEL z{hX3TbIkQrnQXFG8GM3Dz`j_j@Tyl#hhEj%EK_U(+Mg0WOBl+y<^4fnx#pzKn*N=*>Z;}oBkc9Sjj6hzAY5}Nx=EFa-i`4%uv z87U%Cb5fVBk=N69c27nNZCF$U#rwVI2@nKLrFJh}_0j9^c#yPi-`OYG7U&f3#3+?0Z%q#iZ~c7e1m? zq~z`da<6^Yi?)>)-R-uG^H-hOp%zieEf_;3Q0O*{5=xf>3+78JSt9d|AV>Hwf6Ft{ zi`xo7DL_wkzESXijGLkZ(SXx;Sw5Otzr_HEMYF+$db>zgiO>nr$!uWC{x7{U-sIkj z#A3h*-%8|LG2ZmNWQ4z!CfAVZ4B5Lno#3Z?aqpK5D&!X=?n;W_!-E9h!hAIO`_Upl z0f5z%ehwKd@(*!Id14}PmlGhP=*T`kXRz4AgG$sl--@re^I}C1A z!X6BuF63r@$ohL_&VAlEiq)MGE%Bw}=cA6vlE9rQN6|b$cd$Haoj5dx!G~@Ck|_F}^xT$px2(DP~M}*pPo>5?M+`U4kANB*Tz` z?lE&;vQ!SbZkVa=R$&;V6gUg;ZdGwZOfWiKBHNBp_{R`4L2;M}X`=_tXcnqkQ52b5 zpgP~(9p!$SP`IKv(gJ+O%{yicV52q89TGdAcqoKdx&@NFH*04!z#lwvMN(>Ko$Eqm49rX zczNNUrez@}rTWB5oI_RD+j9j2pB@#O?Qe#Wg8((s(}w*}R)LoYnFlNgse@qyry}yy zz>=GK`U)$OomD3_H2b?`JskPrf)+_zZm4$m-ZkL5=?TN>C%lLq`Z;7 z7d%JQGu)uobHCP12rSJLIGSprX+_{es`V)f?J>1qCmYV-(n^qN0W6uSk>t<6olZi2c= z>|EZ{X|p-~X(w;?m!wBi{T5DY(y3i!GWYJ2o-2A9m(g5+f`Mb?ggd{rbY!?uk+N@8 zO*UH#VU9Egh`;F{>O4Z#Lc<&fZAji>H|30u(P zJyO;wVZV>h3CUt=?xir=!_Lq#;^r0hbXn3Tx@%OtQ*B&%`a*z(yC zsYd@6zuzQYE&pFf^bxMIZu48$j*Hf8k7hGHe=T2R-MwzJU{1>{|HxXrx6o?!fsy-Q zvyADJaz#*q8}K^?FN=VMB-bBwYMxNDWc?Vms=91DdfPOP3!x$4A-y%l?IXQ)OjCri zhlBpu`c!*jl4>GY#7AkD?%4RFCFNjyrt(^M)Kx#(kzOV=3Eig}_0&1-64_o#U0S$g zb7sm2{qlU@mh{XNYai4e2<(Zt5i=u4qCG$xJ#;5e;4X~SJ5s9RVvdZ}2cOasDFaC( zjyHDaD0mn}p6!{rsa1)Xhs~qpBt3!PZ8U5wo7yaD^4Bght?6^!`-xGLu;j8~qH&3% zcLE4Po!8v9F#t@8FK*FVBY^2?5MFTYg1dU20`|h;0U`hK-9ZceMY}v!k1c1(s-qi9 zqwl)zj9m4-n*?r^yIK@@4Rv#tTt`1$22$m(JM*5UfH60xEul>_atQb(P?zXM8?Ro>vN zT{b37CN^|-Dix_hN0_mML}JDF73q^dAJtwPF$}VAjYxx4V^hmFBcvn5**9lP7jv2k z-&bPdcM#8Zt3)mq(!(T*90iriY8T=-rD45*Y_6K_Z{)Mov<|Jz$^10l&vAjytT$qa zqa#aZo@PmP07IPyLP}=U9eWkf%|W4zYt)WYvSIaOMJ|3_@YhQQk$>xOUoIJwG2g=k zu#<8n-%>xGPh(FRLY`NZJ_lGndy#?yiRO|mRP$5m2HE$F$?6HQ1(o8qm`D^X5$03K@aMF1I46r-L;>vLXGYW$@|hWd z4Vt@^`$feU7xcM3BxO`v_0^W|Hqfxs(VVO-?GFd0xx6rxs+|_@F~iRxziw1+v&C5% zz8#zaH1UXQX!^A}cq9jV`hUm`5?)Iz#1pUb+Q07@~9KrU}cJF7z^d z-HtUp7AkYwb;dwwE4qggDl+$@VE!Ola@n`(P-|BrdbAkimM_ON{MIz35et#QJ@_Ps z0B8{g!K(cK{(c{e;cDY%LciGFX@TXn*kZE z2xUnL+d6L8-4oggAkn~VR`lf5gFao{wT%kN@6$0hEY!Pm7l>!imVV=#<^fyPC3Q#M zC*L2J`>b|J3wbEe>vwCo#Mq&YYj8!ZXcEaN{cMCD5I5Wwyw>`DI>!5Alteu0&I!6P za9FM^`v;v6=>{x3{3-vKC_M~g#NC`x%1bv!YgsP(Y>hZ26}R8gF}NU-dWu^u)%euH zLoSh2EZ*?oN66U2;zdpKCo}fdL5dawtQv2<{i(5CR4Ne9w65SRaE4ROQ;0Ikj=?aG zROW%v{{4Tx(Xr;knHt;vIpe>O#)}!)M&6XoW!ISHhlau8C0A!9pGBu_Y0)=!ncAqz zjf=8HnjLTklphR@`IY@h9=>hd&}&<;H5o-AFaM3;25`z@Bbp&M;?lGc^u9vzJdKw} zP6vGeRc>mir3sJswfgs>R$7tdZm8~baCP0VZxAH*pQsunEe!*aw3DaST-nd?GrXcfNtl$8*OGSafvn zYTj_x;ZX8M(Wf7s^O8`%u4R2zw1mR?-mI9vvjD3s@QDC)oS%BW|0toQA&ZGPjzj|4 z;rrc0UgLWz7iM5k;xc=*7v0}wjOJmTs!ETWm77)XQkTx({K^dbptn*|Rc&yjLacfJjDOBUE;rg0UlAwu?pr_}P2kiTfyz3x zE(_vTG5R3;O0B;ETVK;T=U+q@t$Ibwlz)ikKs2#u#k5_y99~>b;O~1 ze+uB)z%j`W&jC~}Ub(z?4+xnYjzChcPh%Yg?MzT~+X5MrBA{G;BTRI2dD?FF=_8_o zooa|nRDz!|D3$BSs>@HR2w`HKVtB{v=!iu|!xx|LXN&8m*_7V~X?$dH_JhhZyl3KV zMFS(R7nTl9Hvu73-5f6}VM`>#Udr+F9V}!Phd424HPjByn@E0mM}2Um%W$_k_V4vb zQHTr_hhJTv+}8 za$;(sqm7IOA4I8)-T1P3JO6vOJynscGk$Qdj0fihxb?Tn5f{h&bRF=p+sUHOPXb+u zXBuR+zn`eKhw2Gv3}+c5_-NJ{KyHr=ENfJ*moaBX&a=0WDD!-KBwf+%w5aw~SeI_> z7Ba?q+<==RZ@cqt%GjLvmDpP<$I}s&>?Zz8!(8&)4A|VU|Ji_ydCpn$5MAxNR-Ttz zK4BPz#e8YkJ*OhmzoWWqPwu)6f4bg{EX8N4Z4Y_|&Y*fQq&5AyE4f&q48AH5)~(Wt z<<6t3v8%c55{e0!8z1o$1jl#6$N4GIVx-#8$0ohEFI!BX`O|QN5ŒqBl%FOh;w zdvSE+-r#H^mQzKPzVx#6(MZLQkFrv|CB(3p=`GhM!ric;`?hz60k#LQmei?x3m-Rq z#4jEgBKO%eR_gYG;&NdgDpJc9z;uR$ zC4;Q*S3Zk;x%DQ}@xssb>-mwqS*u^7HvH{A*-EN5xQG+L!0X9)(^-gncj?N=jpi>Y z#J(E4$jLxx)Pv6*){9@~fTc}6RO4KOe(EN-$6IWOvUN z_;=sz?2SG;!cUIVEx$Q~z=D$60`E*N5-Unh<1}}laGX|AWM49T;tEyOo@|EKOdDXwvb#X zo*6K@Yxk+PRhrZn`2EsCWcQbU9-d?*VyEo2VBhV`)d&0ZyR`NTAob`CH@mB+GlxqXe7I47biT@;{kpTCrqOX< zJjmDPo&j#n&i`w%X|>hg5$UE#Q|YFT-Z7F_%V<$oSbQY;j4eg6rI&ufW!7_F?Gy@b z&Mcihmp2X-zV58UO6%@44GG1a$m?QtM?{CJ{DfmTgJ1yT7NAumb!E4%n6CK+A!aOA zh68Q|7Q)jQCF%{riYxBelkE8g4JIc;h( z+`BZ0x~+U=ttD;jC*J>e4?}V4?VZSPVs$uarpSuCG3ii!c$;>HijCZSu&Vm$PhO-i z^h|!08!obt^nOHII!f57Lf}YIg6}7u9cP{Pf#+NvuG12~3=o?@@kr&Z$biMMzsua# zUfBXih3`tY#R+L!N$=`k^ClSYw@S=-9sJfZA15B_FYyeADLf{Xh!6Bk57XaoeVvQR zmLKQH{2n8>6yT`s+1roA7W5zLSLv!6pAwe)*?IhUr$WAYfVKXl__koEiZSw`$d+F| zFW`hpRdeEreBM>w)GW8y^sB6*&A-*sU@nhGIi*;|kf$z|gX22li8hHd+#c+bWNy3+ z(~0>SL>Vl^XLfxw)V@0VtI#cND)oC%TaX?JIf`Kia)KZwqtDB10{pm`Z$XNR<-n_` z#Ld!gBHWd-vyaeyP*#o57(eBsN6F>CJT}$VL9dr}lYh;dxm4+~C;n5FM(p7`vIDy@ zaSjS=(L=92*~4Sw<@ z{7@P3iHjm>O~3F`(b2C9uY_H~Tuy#)ils((anA)8aU)(XRspvz*}42;ix)*&x;RHt z)k)TRJR$p(1DJhzv28E*@#BtoR^G}OSNPph=bzm9&k(|6$~Po&CiF^uv6hxC_>BAf{KAV{1;2P}*!gXxzTS7KUULfY5JIew9`j`^mV_V5He3fb7HW=vH-X>$whzbc7NzGGA z8VSg&z3I2|&A*r)^<9v-Ni}i##y&E}%{52rf8pQQY}9%neQ%?vmYW`j&AIIpCU!_d zjDM@R0py-l?@S*!2nzM*I<$3ibi-IEV*4SsIa7xiI#hh~dGn+)n(lu_Y$NIE2~$48@K@KpyMd-;G)U zFeS;$|M8+C?>n}OOWtJ(CIy0G?JNYH8KUYrth4~gcBB5s#}lSA2%Y1DZvnRGX?zn| zJ}h<#P;SuIT6|be*V2HL$?n^I)=`h&5kFilYn^foLjF)@nfA(!6W2c#yn?$-59}56 zdU*k)#DM0ebScEu%B{)kL|%_%bS`JJba5*g4t{K(7s;Jc)tl~8g{4|q$Xdp|nQg;P ze?|Y=X2OQ&@vN2Z^|$*H^&zGgD{yuJvtyA}iGdy7r4VbLZ(0$4*kYGl#tQ+{LBD(XkM!i+M4CPGGMYCaY?Ks zzV3Lh#`&YSzk+Odp9|ImKQulj7jW!7IPjrQdDd^~+L;h_>1Vl9sYDi&c-xL769hUl zGa(oDvBHaSiN>F<(fuT3_g$^GlSYQ$wRu2Ov1rOpq%s z{V;r78~A8&-R6{nC@jDAA;nBA&uI4D0<#Il13u_KRfLnHd(`yAs7$;mTk^2hW)zKW zyshr{AKL3u^gsWP4O{b>buOP$-N(U>UuRt3H0LFsw1o@ZSS5l@Q2K*4*&@sGyhNoU zA?(>@tPU8NUEq_8(8CL?ZOuqN*iL$W@#pbg1H37xRsnqzrPc)>Op5FXIG^GR3EAPU z2;BM)enT5V^#3Ks0;GKVXc?ZoSt&XvIo>p7$KBUS9haQOf|}qs9D1A_p|PffSI8GK z2Cxh~(i66{zT$bxv}GASNg$HV!sw0-CfD~Q)8maKK}lsI%oI^27doz=;lf~>&6Os% zrE_@@L2=#5t$Jm{poTA@l6pyPwB03L_&jy?ieFkNd+lr$=#CRl5)VrP%cKK~CE-Id zm{{SF*+dm%d`n=#D7s)!5!DzV`iyQ+dE=((v*h~EGw&%+a);nH{Wiix`)!g8LvDC$ zVk~3vW&<01@?J6R)`U#PF5NA_wE|LkDR^LG6Ct%dZbLAPu39Oi)}0 zJ~(M_*kP#DX-XMwyBO%$PwPk((EO}aG=5~`1_k0~B0m3#Md&9~0*HXJr_$gb+=ne- z;zc2UpHFVy_bf~fKyU0i(LVOXo?|c9uy5Tqy_xDJ>}vN`!v|WaGW4neYswAPoH`_y zG(|k-zj^1moHFG;o3BiRBer=mR$Au7gILqm2VwbFUjR2B!*bZnMC|314XSG;uN7+x z<{2{hd7u=8)WKzNW=^v2csM*(i4%>waA@`4QQzRidG|pacYbr49&%9B@(+Q#BbR=f zeGT$=LnaE#Cj1X7MdCC5gG9HmpEC2r>_U!O$#jn1oZYcWLOGK0vYQE2CidxsIm3Yt zw}E_QQk$3#QsB9Z=gCPtMcl z*FPeK<)2f3u9x>m{SjwapR>%hh>F@GM zvP1PFPR*~$xL#V`Ae>Qd+3&|2YQS-9P?JdP2Zj(4s{NriFHQQ+NgpEj%E=>E5mWlT zVy;nRFG4?1A4=f&)(44ok9d%~n-&6#aKkpFJP=Y zr(`>0Msmit^fk{o6xgYhJYWHLE~`wVj7ATHzVVw^X~&&-6t3|$PAuv4FyAuWK=3;^ z_{U%k@5*`Ei|5~piQCHN0|wDM&5Q{>@&7bmzrR6|H}-t`+z4@<)kW==J=9GIyAu*) z($F*6_JGQbm@(s!-_MZ-JGDWWt)tJJR6_(uW?$LlNB^&Dh&ukre@|1o9~DBDD>|=Z zpA*Jkc5eN#>$hw!4}9ZcJ68&CCe{UKOfRPI9hmvAjt?!c_=MlvrD$INEjtES({Elr zQ55MY_?#DYa|U`DD$Lff1x$J{n#bBu-Xdw;&azvhqq;l20k{X8Dm`-@SCf=0F; z2X*27z;h_Wd3?@{b2mt05Dc)lwzHBUZ*q@B>QV{E*m?ya1)EpTek>E$IT38?Jo@I^ zr^j_pnhIPIe8@I*^}@|sRa9xjr!PBrgg#yCSyUv){e=KjAmJ?TA$CHwuJ`Trh#BCD_ z+y)Oe8@P#Ts@U^e-4OllNW2YIF@6&GyD@0O011L5RP)Soqu$WcRmR)L-qDAaueft8^3EYLn`dbW}c&P2lSgjZqX63``ESk}0O?`)qkk;+nxku8l76oX8$Ibtsg;j?IQ^B!# zb=KC1=2J*QAE9|*6`wX!D)mSTY{^mLK^@jHj%lzXUI32@F9sjHZX7lP`RU#0KCp0! zc+x7>))SXL(?|th8qnW-qTZHNa=jfRmOBx`67avlYD_h%3^a^m*FG33nIWHd8HkCv zkC}u)MODY#{3;gB!i>YpAgE+hql`YHUE||KgkQPd#X=Dc|K9Ke+TN=0zVL&5$AoV? z{l#aV$GH@xbjU%jT~vYt6-!CE@S zxUo}KQ^)8*%y2Fn1peh3$5u9WIvIvGn$rq3o~TdrS4gdRP?=#YzB2fa@zcc_#)w05ZNHQn zPt-!^AA<6uh{L-hQbMFgg)=ryf*@f^h|vn>>%ZqUJKP~QPdDUAaNnkT-1gj;Qyi^j zL!K7PA8HNug#m0&cptR{p_Zo3(oaLYb>-aqS*N72Ex%}Jf}dYU@Q~r@7JK3M4V*_; z^LHB7Wrq<&MpHOhCx%c}hkX9mX5{Xl_U7@X!X9Pp?;{;tgGHJgVY#w<(iKPcoQ9H+OkJQH_+j**8WHzwy@=uKnN)KS`tczUzBtO=I8A z;0@4o($F9(K>D*Fl$a9GHF3IyZ{ksk=86q38+WpzXlmDLOV@`*cn4^KfqCo zI;_AA4Dm|dXG2NiWBifTe(1Xj_n0CAV`uTr`rAVwFt!*IdpMgb)fya{pJ>M6y%I8G~p`^=-u zouL|?>~zN_I^2FZv~3IaZUkdkcYz8_lxoEW?+tR-9}e7%QXZVi?LBXQsZUk7^U=%i zeatmY&LLn1PlYWo%>>rADr)un97>Cl)X^q7d$~D!;$Ii_!|a~v4i9?p*k`b2u zm^S)Unq#BQPWpp`>dAVUhSamk(H)Gs$I+|bgd)t-0D$bH!CK+Yir~-M-k;zA>T*PO z%Cw0By62ua{_E(R$^M$pzpbu#h_R6Y`s7_`Dv15ssR%<^z1uD4+BU`oC`9PF(7XD$ znX~*9&LLjCliFWD#Dx1wELJ>Vh$%lmZ(NY-*f@dA>p$d3WEev7o~Ux}FmqJgJY(Qm zxyOFULv%J>Tf+rOL$Q8`__>h}No9RU7K4m{#q}|_Yw_xK{qbZAr@Kjuig}lOu|P9l z&vnvs3dbY>@J>Lrw`LR(-nQ)I7tCh+ID9GjB;j^fh{AO=`8a?1 zIFcDy@%WRh&pAN!c4!MC8(SvO4)_*5{2jc^q!8f|86NgRuRuy5u-!Wz{9*DvZHZ^8#;~#T5&fYg;R2n5a2k?hl4B)nKUOS3I1Dk)uH*-9LH5z>Q zfhYGW@BpJ0-&&EACm|@(ClE1~0l9$65m=8^`!vIyT3GUS1}|%Zw=iEA`7Tf4vg5*& zlPi303>+6u+w8M0O@U74+PjZ;h9gvcn-tS>&qQn7j6*Ia@jy4sI|2H2P%cD|B*S`e z1#ywpXbtFOzx5Frz5^9DC&e3ipq>_BUz1;xnJlL ze_PuVVGL`2oI?tMF7FM9ZosA}SJ#qUfxbPf84A3Ym~6_zHqV+>2C5@9paouw7YJ*S z8h%ZZ^AIyBXP6?aPiks;`L-Z15NmJUymZC$86V>j`-f+1eC*@k9!Xi_7VT?{@)AbT z)f3{-k`CC$ce(B3dcaVhF%)lG^qLj$28*$z1;H7z9Qr^x_(yy+k>`rYx?8eIrWC;O z!LiZ!!zeCpR5R$}qF$krhIj~3=oGQA2FbC+8o2Zsh zFonw8;>U?GAk2!{6nO<3Vx5L_?SoSvi%5V923~Na9bT2a9;sRr<@eXg$ov^3{X;cr z>3M3OcSN&3a#L9<NW}59NC9-8GF&sCJ z5JfFZgk*8jxD$3&KF>`US=pjsCS}Eq(z%k+<|djVd`*m@-Nnmj{UOp4JZVxI#Em<@ zm;P8bSs}uYhmmQa-7tD{LfnvTPSQvQ$y!*Y6?X{_$q2BOEv4Sigj`ceB?S4(cn1u~ zId$R!)^Gv5Y*7f%{KG7Ta0L~-g;vka1@X|7`Pqa^ysx18aht*nVMqSDTknGvEC|4l z`PBAog3Q3}!NX=YX=1$~OEI7#1xztRCjut-*e1tnl!`g0cub&q)&D}rOV083#cQ1{}bXaK$$>HNNmv(-gb~R|GuQ7Gt zGM9F6?*ojc5gtS|iBQ;K1vcLU_kt;M#YWqWIk~3I4kRu!_q;F_*{8o6EE+kvq_?a_ zx@QZMhK$|%&o|0Hae7WUETAz{Y_YZRaJy>FI33oKzKEz%2sZB2KPHYp*O;m}{`Q|& z40}?^vITJSVU{W;Vpy47qph4E_@Q}-ajH6@;i?ElE*HemzQ`ZgT*W;5Cy)kv;*Q|L zr?&fD4vILQY_3S0)6U`qowK4NV7j6xC=!4{TfJz zft6paxJegf%N}ZP(Dj^pFOWI&$l)UOdX|t01K|`Q=;H+wu_4?(9#l zkQp>#32nmlX3zSL9t)SwaYcUfWa=m=+kFZ#$1^b^2uc7|k(dl&KWZp=daKAYPDAnN->)v0d0s_azMJ;7UM(vio*n8h1!MTmlyiQ? zr!an9dvAbny@-Gh=2`NXMA$q&rPul7(!p;cmJ-{viUh;&^BlvF>I7NeC(jaxhocNc zbXbjtCA4(ITEbN0t^mhR0UQj-9N={8JblC6+;d4h&%%Pe!69ioN}VzJ5q=-z!~29n z1Snh~io`f5XI7XYjir_NE+LE5^s}q*1DA`-nPV!8hIt8~ql@`u#cM~}hbWuE^F?<9 zbqK_^E#Nk_{X+gklfK*#&<$Y`psFMX*gjv0U+7c+u_N!(rTwly7jt?(WA-B|X|5<^ z$K$NfysDhM)-9D8oe)Torxo%o=A|BCGQ3YM%Tr^oNZ=mQk_tsr094xlQ~v%byglIO zaykAD_kfQsw!lBnM|q`K5kVP+4Rf)rl{vVX1pdGeM&V>RoQprV$@YOuJmLT@!v%PL zkTdWU0kGa=Tdg+#xLlmrdKKkTgT#rF_pvgBHt+iZXY&+H)D8;oJ$= zcKU`q?lw*KlNSlOhp|3DEzOf_LxYjcymZ(;19#(oQ=wqjh;6B0V1>Q!*XAWdn$UC$ z62iLxy1R7Z4X$4)ikBhL3tS%1dC`4a{)^~%_J=(#FPO+;bY96V_OPPX*a7-DhB?y> zFOE8r)srDzk!-*Uq#b78x{pb^qhBgkwSIj)Yvy|%DYo?$rY`Y=Nj*oytHaR^8JLaLeJuap%SILc#R&&SOO)XJIx#D?Z{Xk~=*0Z%g9I8IuTs z4Q26&i(?v@*(yHxDYb9AX#zz0Noh1CM-=3SK9J%1A%cQ7mhX%wnPzL*A2$vY-xj2J zv(R=2>(_7!VgbN{@@9Jm^?l3ni9xae@L(g}a$H8CT}bwi(e7ALnW}UFyrq1k-apF57iULR zpb|Uw7Ms6^R8m40g{X$u=%ae{d|;b_UmXr@5fCJCi0A3rW}Uow<%?(Dik%h!gdrUE^V$MMm|JW4YVoy)+kYbS?E($C z*={i4Za%6X=rbYCq4swJ_KJt-w3q3Z;B|n zz!g%gEVDal8o-{jnJbwq0@PbSn*aKCOQQRz)GArO^Hz&F2;aQdqhnJ8&5IkCWhO>D z;yH7Y5r#4#(cJWqs9E>~FFot)xTDW_7=p;U%>`}gY^-oR@k~AA=rj{h21?6>Q@r&8 z=5A|tW^=|sZk{@;`GG}6LDx7+_^gkflsUj^s{j~htd*EqJb0C-12vaAH*i!fhIYn% z`Cw5>!x5aP@6@NnucN8+CZ(}Qjg8>7U;G`~o4BFLD8koIT1G3lo-v2#D&c_MNh~O4 zv4iY;VA~9Y`k>>Ad2i5pS*Qt%YQ4w1P@N1Sj%?7grSdKlmJdPo0y{q7PyZFmL?|9~l%(s~S6&>dOnuiEpE+h2d+}|GQdz0os zj!DVOQ;F}W$7go?&Y71!&-{mQD-9mq$r$VBe>z4uS$Y zGM)6Kp6`S*O*kKhr%ix1&dUfnu-$DPeQ=o84JOc~J@|1u+Pfwt@LaX@wO}a@-FlYR1d^JoL7)Cw)0oR-9m`-!^*R7MrsRFww9c~oU zDAy_Z6MwV#kQr{NV?u!&1%2@v6g6vNQfGKt-%#`HhfUMO+mKzMeI|ai@mz)oM|@->ccZnm5>9}ES9cw|EisZ z$GB_SUUm!cpjFoygD;uG(~AC8)xc*0L?7rgShA;UdC{%+b4H^ zl!v8E+Jix!i=X1XeeP3mJXK43e+q5>>+=Z>70D*VW??kn74sP~-@@9S(~~5DUG}>Z z9)|pG^r3WM<^&d%&+OCEseja(Yb)H^C_3@RYOVLW=f8bJ zS*6j_+!I0Hr)hBM}s zVAoUN48QC=L^n^j^qq@xz`|fHN@=pd3cQ`qc@6llC(`HngKaXN58(RAUn5p(AKBiA827e$hl#gCYFZ$BuH|uBtc=lSp4I1 z!qoQ9n3$7@D-|4s0;=*OfM6Sm`hLI~iU%y@C2=DKPj{ED=uisR-eS8Ev7idf=&zS=iDQQ z^&j>kF1xi9QbH%K-!6CmGOSXz?8K-W7gX2T?fa2N_WFEVigV9)M%-h*)w6r{+Q+$F zMKeICr-H^wdXz8#X-n!Mx*wz!p3|F0Jv%z{>*pNGR?{6()x_LwwRtsKZnA)rm)W^y zT^BPR6CK0j`bw&AgfKUL`jH4}4qjRgTsiep@ET5lQe^#ZIPi`C?AoccD-ZY>*nz{v zsZ;WrhjK+e-%p!LeoM{numbI_JrVGEj|>1m z<7vG=)`+^cRB;-fW|Gql-M>H(rPJ_)fEa*8FRj@7g zej0{;Z*Zf{^)rHt_zM11J|t^XG|g5ZW8Q);(*bmIIh8lRi6@nd_i9%gj_3&=eI@M= zfHAAUsIUf0NlvUo{$aN?lx(@9z5>AyPRHNr)d_pjw_n%i&K$TeQrEH?n;7swW+e*9 zvuiT@5dn?Ylgc2FF}F%0=imzX@8`Vud}Kz4LaLky-Wx6<=pW`SJmo2`cWX04e*ZK9 z1V(`yeQRBdd)GC?!krqE1oAEgp}t)(ZrNdu`ly)qA>Jn-LX1mII(VKx=ATjbKBX(@ z9><>753?_Lts_QM7R;8d20@8KAqs^_s*5QXH^Wc$9~!uLXXh_c=l;-pzkKWXbz#OJ zph9$cY2w?v5%Kyh>IX==ToEuvX_+6uQZ-m( znSlr{um(`8Y}c$4G~KU@Lkqdcdkz+{-RP0BRHHr3tyJUcT(tX{(j7Unqhd3`c>|#pH)fE{=0uGXeM^VCFn;ecA$kvOAW= z+aD!-Zt=%5N$lFY;zx_}Yvs#>EpGqGyJbCSFgo}@#}AqM75?;%oVxWmQqSr70S_iE z*rc&_$1G-(;fj9%9tBjk=jt}7xdv9o2xb18zA`)=QBh}W31wVmK!xM|C4sMh6C;WI z%inE*Wsa(IZT~T0Iea?w;jnlFYRasr+(tla{8wXJ{iPs>_#(8>{U=#JJq$r8zj%%V z0;P~ng`FHQ&4i3^etx1(7~y&O$U5p@%J z7jVlT<_A)fQq1#9ZXJFuNC9{;OYaE_tk{z=X0yepBU~*9fb7^n%emMvXA%&oKV)4Z zIw0*?Ah3H3L(#eMnjTw~+mi&KBNt}x1!_bPFYx}C8-t-}F%KCZ21i^Wb_CS=HYLhX z-I~DY6A7}bG$8>-E$I^4If)LqICnLY4eIG{G2{i5J+R4~2C^%Xzodp9rYC!rlrxdw zTo3EF%n*O}i%)1ul1d)_`SFVml~X}CO4cvj*vSg95407igT}gqt{ff_%j!9up@II% zM?a4nK;Ls79gR_P*kZfurDT3G&GIaZJly@59CPD(T&sK*glp?U(Q(Tj?vS&{{L?i~ zdJ05{Nux(o#VGHR@nVUW29RF+roHwxxXPLSPJ>GBk?)c#&eY^=%UPYE4-+<8_GudW zxB;yjOLj-yPW37BT+>!hH9ul6>QULYbzoiq5)u&kCAp(OZO{#2FYBr8daG*!F8*(F z`Q+bco7Ru`K2DV}k{sjOg6{9ALr_$Go>J)XZI>Q7I)PrGw?gm@#70ALy zYjzky*iqN`0Q^4ytN{vN%@5!~y-93Hg)ut)#tKTa0z;TjWw4n^UA}MI`yJK^uW@^7A1g~fuT|C8D|+_a zdsAp2iTG^0b$ty}fZs~{&-lL{qwaJ4d*gqR3~foaiOF`v;bZzMk{tUmFOAOAxtL4e z{=ivF()e#_qKoIwOZ?693!YtR>V$j4+<9L-dehTx@SiWvNmHQP_vX&w_Xao~`ZNAk zp|3$R^%@Stq4{sRA6H%qd}c`r;_DTnzbfZ}7RPu`Xe+Xuhot911T2XQ}eS6_}{Xc zN^XhZEHW=*g>_M~RpI9?8Rmt0Iu~cbfVkKxE!Z9};P$&5endCfS@vhAm_y3uMcApi zx5Zq1AR)@=0T3VdUe1(!D_lCpcHwuGj*a^|&M0Jt9$SGIw|M0p#Cb4m4q@9UF;{V{ z(ok?b?dvN6z}rm?1|mT`t+CN>;S9G+x>qeelmUvrIB(^HLk9$~XK^zKwkh?A$){F= zZ0Ad{+sy?e7g!8x8*I=EL{HEcUTeS04!*0cSN|UnB0>4Gm91+@{fi9wwK6tXYwI09 znM(;h7QLu$E|Lw(^DJ3qV6zUV`J91zEau+wHL>?a7RJVH zz2(SK<1vwAXd7XbQv!drlSeL`3eyX~(hOv71k&iRdP*pCR9ytO)Lda3GU?jxXqk0V zPa(E3Cmzig@=BuDV(E0T6VL0VHNjprVB0{Du!dZ<-G{;zZZ$`MW{M6=`}Yu0fuZF3 z%&Hfek?e>X*A~nsE{yf}q-CWCpd%af^Sf0Kia^O+^8BNST&%61?!&d26g}hb`UBSp zWTuho;((zR;VTanm9853n3*kL30shIf}MGQoqTg`=4o1=W2vnGi`)2YO*n{QC044% z5Dsu^uSdT-*~*7qjFxvcxEGWu;D$BS9mIG7UbS(qs2}mP!<)PmI~!0iAG;`=Ql|AY z`@fh1?sHsF`eq@9qAc5fsce#O7|A7=CIwJoRNFIL4Hz;#B(<`{H+{-1lP+Uf>0Fz7 z&cDtpgh{OeYW2RB<2FyD!es7oECxNAKJKU@7#rY5lkPu9tbB9+dlPC=Y5W z9h@+%tbs4^{Vmu(7dza`jXK(QVPqr1|Am1ArX$huRM4AL11Cl;fi6ZLZFLQ#l{hZ# zkhGM#6X=>d6_-BNBX|FND%n@mb2=FbcCdSYfqPMS{y2dB=R+A}Oz`$OmymemHs5{57H;8f z$~4z3x_|P?^W}Mcr|1JvsrfYT_3|44yF;c|?8uSL$S)W6&R=rOGHG4>_P%~)Ji1X5|;z5FYK>Yflj2*o2tl0ed zTu#(x3Lkp^xA$W~`8oXWuvNm1LJJ#*PWiQP(wxo&KfRf7H)PbNWWu4a!NwlZM)(@; zRi&Z?Lmi*8QeHcY@q+s;j;8H=M%Yx@&-szQ z{78i|D9`q~Xm8mM>xB)zO8$^qKo4Z*S#~c-B{ucq*n=(uoOpYfj%}myc7-Qp$`VNN(NRbLn!m%B`@X@9Y1RYUow;%}&fA=kGy zYKTyV-eS*+hS*oN7KNU<)KggWWCsOKr5O)h90%X2kK3M6GY+#XwdGj|Iim|K^$kIK zkgLv7P@T${?8Tn+*IKmSojGN9_cIlvL;>gkE&fyGDTJd3Egt+3@t{f~d@rtp^EOkC+>Kas1sRxmyPPf}+>PhhO|A6wL_ZGw?* z3&{sfK9QrE`5EBAVHll{GP)DhZ5XTXp(c*UkR6?DzpB*&fW8t_*0XG7T!!@1^d?tS zkw6r8O8T~VU)5>A-50_0-Egq*Yoi5Pkn*9(3L%K=@x}8pGKG4zA$gCDsEV4cIv+kz z1w*8Y4RB6CjAWRecVr8ue@N9rMJxYGzD4&aWzGBw^=v2AkEF7i$chf>F`SsPq}@oU zt$)5u9Qa9`wZCnD|CfL5qri9@fg8GW7KqGn4Fmw0 zr+14ypO7A1c;o}Ux1op7v!SLw7VW~2Zte-wxu=9@kGEDpkSQC2fB*{7VtVqRRIbB~ z)(S;Hu`yq{gh_0Tyz7E>owWH77-Ho|G1hp?6x;R} zo6o9h0P+U-k4!_{!h?=nzpp5IK-Sz?N>nx>{)V7?Rs3_7+R8@+O5A1BTvSx;D>u58 z(&Z;~OWM>?*2~YG>#tDNUBB{M_1hEXKBF|BVJP40@(a9SU$()`1r}QSb~#r@sk?q4 zyodaN&Ws+%^V1BmZkdf(YJDSnimA_`Ton&+Vyrym>-W}}%WWejHgZV0{0w6LspC~& z&FPJgQk7Di_)r`i8Nf-smZp}6;sKmDoI-YY6*(>rHC}+A#JvlToP6B?Ht)Qv-SY)0c0_|gr(^(}nhblB zYh9t>ahWX4w163ei-&u<%7`jAlb2QM?O7V(326!Nv(EOMz&*{&y7+5G}NA@d0YN>rfXent4oB-h*Ta<-K89 z30Z%^AuLa@jpi1pdkfnh?im>?*HB_Xv{G-coTs#kV_P!}G2;twbLN^HIs zqi@ch)4=U%)pe2QfH)V{kX@*e)q$gGhW&iXOqPp|@36_~_KxVHbj_KVnr0@7iau;j zqb*zzd&pxcd}Z{zo^!nGd^hCRfdL77UFnvVpTmZpAXhH5;19XFPy1iu8um=b6=XWa zeJ^j7P&52R2~Ekgfh-alFD(Kd`q#CVk9mTrKJ>S|jf_+SWiJXstQMbk6b$@*MMvA> zQQY~l!i;|!W;|zm*kO;O6is z?>cx~&4$b`yZ6s%&pRh4`vdjllLtsmaTudE;|Ee*PaxW{jpf6@bbL-_I1{X_M-#4pP_=9)x?s2 zYmE)S*KbINopL$>GUt7QA^+CceCit)dL)k*IY>$$&Dw{?3*{!8MymYx!798h_UA<3GM z3(u#DxXe8h6ANHVe%%L`YG3z(ya^KTUS}To#is7T&o#&V_`CaJ0q!TTlDvt$yTRFo z?^fOtE=HdX9}Zg`mELLB8oet|RbF3iX!~Knm)jKfj9*ifUk5vPcD<35I1^bq7w|W3 z-p~lEV6!Cf>zeL+sX?IvyT2z0=@OK!fT4KWya_9LyYWx}?h7viP#3m@uHgoz&T%7? zRsbvO0UkrYyBj$7k6MhAg~?dkJ-PiC`@))Pw{r;$!)4htmssgvbAO>3$nw<^Kx7+2 z`H=397YB`6DRO2CcE?(LktH$zzUX?E?ex{wL$i+b9oMA)R&L$21)Rc%fLdQRb7)=c zo@mReKADGJs&cF&od3Rb{CuqdKvr}D8BL6{abE$#meHJ-_yXC2iza&#OSe0#?KV<( zgzKyjVauNuC%s~B*}jgYj{)}Yt^j;#>&p4m8L zJX=^p8iC>z4}d%gO|l-^HMcsLd!XWK(ER}7aQak2@oXFKihaLFh7(_rQ`76_-p%a; z6K!F2UTVAbK8+9FkjpY27-{XhP28)g*wfG@^^Gj;?F{Kf1G(7Sjj!9#J_q_hw;#T5 zN6T^d0ilTMIVQyRls_Pnc!^Vcr0s1$#qbBce#J0w?z8^Mx+~?aK-AESSu6t15MyML;-kGnuEg3IgA^y5 zaVby}D1i8i$Yf!i+Rd=2Sm>KIu8n}*e@@;si_P@e%h_x2@X)j*cKwT~Jlr7Z;wF%p}u~EFZo2 zBMJtkHkbkmw+hZjDWdZ8qG}=g0xOd*tv6?yhi3Sgoag5nQ(&?8zqcRsu6F^fUhLM@N%Emz4?nQySka?HsS6wToxR*3Z)gD&*Vn3A zxLXvaCPIn$wUGzF1N*>AKcCt%GD?PCNR78=@u|jX{*-}X`Yjx)AE3|4!n9FB-q(4a zr8FJg>X z)6#i&{Ifp6d$c>S>)LT(5_R#hVjcCx4ZDb6JA$My7nSt{OF#Zb_qqPZ{Upg^yR;|7S#_y&l2kG zFnr~~c4<;>_Ol+>iT&J@uTS_xhpkRnsDSenr31$cC5~P*lkr&`aB#0%dr(fDiN*%AK!g-C}RVWRb* zem(-$F9C(_wrFJzPrBX%EHvjnt9z6_G$oeMA44m+jG!U}lx6{^uXW7b>88^!Vq%@I zl}_&IOcDk-hzbD%%C^{w-sMO2=Wn3XIk_`UYQ+xjnv?R<0(G%hp;5QQVlj~D(sEI^ ztZwS~f23N`3@$lYW67@PFVU?hs)o8x8`^*GWh{G>dh~(V*ic>D|DNJ&JXeri{SH@n zEdK*U^ezTapzjCX%Ur|*1O+-_PPOg+pWFYK7t*Ph-pM}n>H45Wf9JCMJhSyxbZ2JR z7rgte%?_{e%Bu?#)jxlwJia)dIU}3wUShnqnyVehrxzFeN&*}CH~!*?TCS(g*6WKF zb|W){Srd2b!O ze>(PH%e2J%QSjBjf8CuouBg?3S^C(D9Ukf-<4-R=f1WQ?mddM8h9bEFb@S!QI~#N# z_}IIp%lhk>MsT5^M@RNDq6VW^;mExV$nPTF(r(p0(iY0q_84Aeq+Gwz7bsP>O87Ff ztJf`ASarq9jbPEio-7pe$Z>nAa?dflnz3Lllx8Us2t*e0D07VqUS=t2vTpC34RRB& zrtb$Y2Z^4p`qTg1KzH@|Ni{zuAVroM*sTXNqMU%D1j8zWoBUdqUpeDvDMZG+K4< zoSUV%QW*jWx}Dc-Jf@Y-!>hhZkpp3Ct^Jq`XkE72&4M9A(5!Z} zIN`YfM77>DjaKiXu>Dz#aW_y3S3esI$D9}SfF|8zZC!pKAdUz0VNEawGQ24Sb_O_0 zyZ;{16D)By5O%0~B5mE#*>p@Opp09Va=23C{BdGz?@kVo@dcL#<|8QH-DTznC zi|0Wc)nOEP=&eIm@HLMJ^T0o7_50569((BL_Gt5#g~%Hbx!JWO?lyEx#VUM6LG;Ho zq?{lvO2W{UI;YNk?Q8DoEUJ9$;)+qDhUe^&2@J$Oh^CY)SKK2%n0aNAw5Iec`mT;q zQ>NI&dgzf1U9|#1?b>V%8BiaI&HLx7BqC$= z!3Nnp?O4&q&(IuSUs>GempkVri`CbXFn_yAvo?GnfG!cx5j(%J%oMr z$<8T4lpIcEn*0Bsw(~dzY=q6>(kbaR9P;eg4+!eps`5v(w2Cc23&z>%13&qgcx!4| zRx(7ezlDIe_xt6taVntai{r~hU+wGQl8kNnYW+w(ILowQ((VR`du+)`C$^+Fh`oPR>Yw~?>p zuw~3Ri?D9g1%M$=#+~s9Ag0odwytcv|67-+YMNSTT?i4rcsxB(L8fA%)^R3DQyt!* zZ5ZZ1mMsv)htzQh4mc2Yzes7YL-EddQK6Sk!!PssK)d$Km0Qg>gim6MTs$uS;M4_= zNLfL}Pd!j%-rDxA={3Bc)A-^6LWv`<@p_2;2K1T7k!?F&8_l2Y!~b?0NQU|; zoLf1>w{xcCuD#EFPi|yeBjIKE`}?4Q6@ky#-|74)S)H+(C2ij#Edc4LG{L6~9pVCu zLl1Fru?L0#hN7|*9|O)Er7hg-0K_ENkW(Fb(gmBk3#>j!1I z@DpmIa00@C{}=4Y%00|6{Wr};<1ql!*An&(&1k%IImO=rlzjQmxNAG4gvB4A@QFD47*Y{djaD zlF)>LrnS~2*B6X-Z(!$sCs{VQM-=N9J2q)bo93M(kO<5crbB3g4h#`Ax`bo|L*57s|a^B z8h4mZAIr_X1r?XEhO$Y3_6`t&Vj%!4r}~`-i>P`%-lPjWiBuxP=TFLHr2oHAR>pkW zt1k#I7yd>6@^jmDYaMM3n=%&y$_eXxv;+CX*m2ny2c8_i41M{c7oUP9>hR5osq&%_ z;PrpLIorFpxJz9B>4V zqL(=$3SqRx&&=E3<4LtQI&pxg&UOuFZG1RQtekt{?1)c(772SB?ojv3MkF{nirSbS=skoTB_~oGZ`rY;@{1`kP3_kgR9q(}#?WRBSY< z$M#B~UA5}oanPr?T;3V~x8riQ`@&IZLezMd{4YKlcaQ*bx&4A(z!Y_$cJSWm&q4IK zb?e`U1aaqi{kCIZIYKHbyxCvqty`yRh!^!P2qOzpm+uZHzfPV2^eLAT#W?{L3pY}p zFGY=5XpF)Oynwt+QL?nZ zIO=T!G4;18;BL`LpCZepdjYTBc({t{2^hv=e6R4lTn}CA`sEG1e@EgvxEx#}?h*dV zYitUi9niwo-~V?XcpYVdLI{7?Kjfam-u@;{eYn4junueZh`VZV)kXW@^a)eV%LfiA zal>RR1#F&>cfC;DC4eA&a7TrbI@3uQhOUxhvR z{z;S4rM0IhZDP>icjw`?qv}qLu3k=uM!#_t{NDHev>#DG+-$PTiz4lPgtipau$ipg z=B1^my(RSUt>%*=i;+LK8qLBar<}92W2ZSE^%Quw?K>QJG*l-rLH<__l}Q6*wn<<# zS*In?5xf-5i#I)VmeP5QH@n`SD;5G_F9m)d_nQucu;04-_$kf5%{0+^^`Sq@Y2$r3 z#YT|*qjoWySk<(4i`IOmzgjwbdEc6y&WER-SV%xiGSL2bcZDN!lK=LiCZx78FW~h2 zQU4&Oa z7SMvo#3~gQ8luP;xN;M7n7)?V8Dg`2PTV!oxJgW^d5!2yb9`FQ**7ZKSD2e4ak~|c zO)Vd$c^-4CoFmSc@H{xrWY=Pn0|NFkBpI+ozwHXyF>gVO1cP7x|MWCfs?+uP0F%lDbR~GS$jI~U}NFR_bYZ@*;6FueTO9C*4%5e3*0BB zPf+I23mlV70XT1j&e%Q4rv?SQ5r-FUt@vxiss|j0u!mm48RvX3;@er4WQZGe>4C=f zlF|XroJpq2^pi<@&!68nBPZ)NUYSLVN1xSDAKBi|-uJPVq@!=8J4jC3qM`O0v(=Uy zEv6m4XSYZSD9p5^b%Mvc?UPTDWCJ6sIl^+dW0)w;vezdpcBR0RqO*q3b5kx4JIF2?jV5I*N5E6N!DT)NpV8`4_{#MLbv+%VzQIj(L* zMpWLj&txTpSDF{JPetXf9&-xHH{1UB@=|q*FuAw$OdXI5Jxx$>?V9)*&u?pUeW{Tc zyK{8OCbyXNoizjt;f=p0N#FVhuS9$`FRXQ!a1K$GL@XYdY!*{bM?+9TsPP8ih|x`) ze5P$f)P>k*NSvdLaEjA2eo2ft9_Dkttf4eoS)5rL{h^KvLvbvC3&1gy^}^d0KjQOW ze0gW0(*xFC0fDZ{T)t#-9oOJ>C;G?h4c5%Le{<6I^6$KbS0g&-&e^#g zgJZ@qv8h!L3iuB^Qhmj9vS6`pcWbQVWl40}u3m4u$y#7Owgn0Jna=j(_ZSmpA-*V@uNwfot zN*mKOrL{dbva?f_C7D}SWE63eKD*p1Y2zG8rxIVH_w|iP6LOebhx|;#oM!?Qi=<7% zg(z{5_EjV0HTQt6U&DOwc@#FY)i$WVFeESD9)ls^17tl}4n?7^ibrTr+yqJq5l%uu zWS1Z}micgr{u6fLXA-^$FknCzb^T(Hr^t9_ptK# zkrUa~QcY4+8(i7`f?32GZV4&5on2J1#tw5F-VHc?d9dutMvHzqk(KimS+Vu%^Q-p{ zPxLF_8)6QXx)j@;m3K;rxD1}7*G`19~h4=!zqe{HK(cN?SegdkEm1o)yAAq zU)m8OC)@jl;p-0G?dX!TAlK1cn9Mp9PNj%+dIjKy%tUOlpB!@QyxCiv#NkAR^V;1I2Z>0Mzw(8~+ zOa~UWkystAzN0%5j40Xl{Nt1<#xPZvO z0A?juak^wzVI+$$0%6(99hFCoIQl|sG_irafl~=WK*$l~@sf&plQ7<{tD86xp^(~2 zn98`w^IOFjMJMk2`If8%e9GDW#o&ddF^f)kNsbG1;xll_9!k!mYd2wYCE~tR?z=U= zedDE1EJ{9oIK>q9d!6_4V*aZ!m76&F_vr}ntt>@_BXnn+-P2V=WpX3`p+Va!z0^@lI1zqVGB|PL7D*#oSN((1+?P^!XFlB3VlMT2R||aHab@|D z^N|5w#ph}*GyiVxY&TaZiLBq;zzZW~TF_0$WLo8ZTZi}GcEMR+`qLSoZ-)P#$A>q= zubWbt%QGD?#x`p&!voDmQY5CC`iyhK*Q2YIm;(^@A9(78@Ww|CGbR&8DGk?%^_ETw zI8BF2_vyPqgGuazqYI5pIYfAWR5irqdFs=MfFJiv@)OW4ud+#B#C^T3x@*mz{9e2q zrjRD3&X#=WT+Eps+4#cA)Rb)9pb=UnGH=XuQ#BU_GohAOc>L3!$CMZ#de zDHPvfe_O6>5lV+c66x(V9Mc7~95F{TbLuOx0`Ii{KxD{MWgePEIJ3-c?hJDwe61J^ z=6_~c)+ntM47v9-TPN@wDYr`V?u<~RqHFXTINl})p;b!?$ zdQ&~n?_%)mFKu^MQE8S+%^WEb9X;87%jo=8ZPeS>pM>EGCpM5rsXq>VSrq#USJcvm z6>r%*;aN6DDpMQ=Lf-@sy|lz1NPUe43LHoN4d4$#<*8K zvX~mc#ZHCuKsB$zFtD&&G1S%Xz<)LDbKT>!_`|f!3bBz~QxLJ{52u&U1^G-3gx?Cn zusl3)3$ySVSyA}(`3A>))9F=Xo>lLgqJ&#}!b7OEls2#Xm4(#Tyy3of)xF9vMQcQ2 zx_Wc=vl~G~BMSSGQUo-4LO#2uaYx4^Cs-FJO}Ruz9#B`K~$Pjcne5^ zGLLwr=z0TuwvXYH%>viP`J#clQsf#5TKB>#4@M6?K(K$(+P9wMnAZg$4rs>|bdgY( zXk9Q(W0|Y>Od6g9g8vVvk$hfT@6iD(&gY1Rr8-SyK<~u+x!1|HgpOHORADZbsTAOpYc{G^E zdn?!7&=zK!&O%%H{4&w7S-(JXh-#hd`}u}#Tk$$9`!nhJt6w8?Z=RE8;(M)yQ{^$v zd8lOQGBjD8xI43a{0$IdEOPaB`wQ91D9Y!ycemQ3&mvGk<;z?7{0k}NaZMM527FFw^ARr*o2+kpCT|n1R zK&<2#n|CpY#jC>P{vjBK>M;t0ZB^UA`;5dGGz9&F_D}lialhv$xFHwK7Y$#l8L71^ zhbXrB1QnWswBzv7+>`0 zQ?J7dt8i8B4}?iQ52xW{9CdTG35S&=E=yrt-a!=s^hO5XIir5@p={jDmRwbkr~6K> zIL0=h;)!6Xc4rdHa}x3W*5Gn2{w3^SfhIgT}G4Ck4&C zE4C)G_97WNr4w?7>YED)!Q16AN~g+$RMeqAhp(uNRVhgXomCmhr%AzItU9HlB7*D8 z_V-qJR<3?_&c1=ZfVhq-nD(WSc-%yPNQ+regx9(is#Gil40GMto3{))Of5XPm%R`( z+L7wE~QS^ow2`w9-xLYL_!&EIok*#1tg!XPz>A5 z`ZSib^*R2%m-QH=L0mTi7dro4vlm2mr5lLp zX1CTqAQ}`-q-1wLUoGuQPZ|)BjNVIv-r-sM>bh-a0yDNUb79kdxVCGID7H-$=nl>3YZe+fAwRm^1!yIJeg z;2Fiex|4om#%S5-+YPd6((kTAZ=sQF1X2i5G4($34a{g|$79S}beV;}2eiJ>-P3f??mR zqP$@1y(C#VA#1$qjpF*kus4&pKydN2GF~1_r;nli6UV;l3||@9RuI=qbU*G@7^}WX zYhN=9zX17WvS#kGbIEl8yrR5YCJEO9y|sSF=s1#ulMXl4R8_CFPQJvwMB8iMHY*E1 zUA_K<*R4}nA`aSnT6e9vh>ogqhWnuAnv00N&cYob5Afe`o98QtN3MHB?$FNLiz?wt zuaxbSL6?ca%U+5XZaQAqws~#q*b|dla-Ad50D2SBH!;?I*C>VlmF5pP4B=Yb^L$AO zjH;6Z#4FwpI)wkgIqNudKC!I~6A%jr(qCT+f`=JOhpdKYjgKyF1o|g(`>x9P|9t%= zB~MON+^qa{*EGcF>*koZfMr`YG$}1Pbbqf8kqTsT4&GCOstP|_+`d}Un*Uqs+DkTe z0U?M4;jZ1$3I)I2jn4apPr7jN$Ddq(A$BI4_B2Afx5j6`L*%98;j$a${T=+V z>#S%~v?b}XxkUB)ej4%cIndUi5qi6gkVhzMtC}7}4ncgczl~Jy@zo4x9uWHVG0bLpWl~^I;~_gxd8(8RAznK9cl8{Z>>o@S zp{a!$dFZ7=Vp$c|%P5l&=#0zfym&UcJ?%ddRRNV?UWNLNMS5ZJ1hj#RNH@;Uq1ufh~m!NTWfzm|&&w}wVl zjb%-wSySCtqNJ18*Mj_NFm-pz%YL|=mqGabFk(i%)+#8_nlB(^Z(*{nM<`M=%0Fyw zu@~*M{(MKg(`Ur&`?T(oQD}L=(T1d3O;3yV!@he31KY{TIx9-UVSY0%oLu|C%z)`$ z+xa7o9u|9oKh4{Azx1R^)ONO9QR&#S_?}}i6u2$053XCgf~L$yJ5yzOUW5zoRPN{P z7g5t!Us4l7l%Ao8bg})!{d61_KbH+5l;`V{VbJEZ{n!F6Y=~mBS~duJ6K4%9bSD&Z zOU&LV$u~~7@w;n?RpHITyNas~`r<2h9_48%Ery1cqOfrc)kQi4 zRRL(P^dY~S{FO)@BlO7qsP-!=`NOeyhw&0BAI{8@PCA<7zF<$QFM8&{;c?I2T=U7^ol^nP1`vh4nkxgr9L5a6+|h=MYx%d@pCK*n1GRJM z?MJQ)A6hlkJ>TdX^mJ<`^C&`|rY6T2)69yyF}HjqgMS(Mh3%t6{n^Zg-NG z{*oTVxli0d0N8Sxt7J#0 zS|KMJOGlJ@5EbT(y;-_(d%rEb~%`VwWtb=n(M zaf>U>wM~s0I{6e12iFbKi;BKzG200>i7q|Ns~AQ#pFE=_dajf9JLgBw(Xo2-FG8NVOb_k!Nms=K#ik%`-CnW_L;a_tMc5P z^PECCH?}H7u;oqyl6k^Jv8Vdt2YK6@S#dMV$6)5ag?xR~tMT%QLPE?X)6ZlIizs8% zp^s}b)?vq%sS(ww+8llNvWaUArgA97mpGT1IS4JH`tF$!>g@Z+3uB%U_3Wo zn|APq7jba#W~#xd`5@}Guzfjx9%7W!wD${$r}v1+XSc*?N6^S!UQrA>2c<8%UcFUg z!~BR{Aww45nkZ=_2qwHJmoWNZ4KKoEb}eY&@ZY!0 zd~@~=oyeg@Pb`2L-)dYLGx%&R(z~vvh`NZ~SX;v2H!oYjr6{kKJ&OOjxbhJ6i z{P%^=#S?cVlc0E~-7QoI+IjjQ2ShyhN_4=TBJM?ErCuCtOZ`Yf9du5ePLoPfI(SUk zZ4z0dr_?YwlVHi7d5YeZ62v{K5|Zp6OLA)29h%BURCdqq`!fAoG5xsL{ww=;BBvWyS1vWQ_fH|dLJ48HH1 z1#{>sCPBB;f$Zd5OBDwb?SSC*n0?<=>CLAd`GGq1OuY$4AFtEFw07uD2NBBfjclM4 z{acem&mLkz@#DAGjvzKa*e`;y*$a2;XiGpa*fZyVBfUd-dWY`4yZ}sp-EZ!|U33{a z-h$7D-B*X9>y$IQn+6KB)&70xbMbKC#0D=CyhSGV!h~5n;nYaE>K^D>yccwZMY48k zu2pyfL_W7wsT>5yO{eIf^VYNxv%TTzFn|(M8V%W1&i4IpsH}^`gL);c4VB1x7#g_5 z0fuX8u*u+FhJD29zq1E&ReAz=0iv#6X?$q{6fY zY1m8~vDhs*t_9=*UfcKw4&^_?i6DVWd2a|ln?bRyY^~6i{@?d^f+>@}ov2;fG@?&h zB;??u6JvZ{WT$kE8-KTMY7SIyogoofA!n3J5u568pyq2nv9G&AJOFlw-vEw7<;;42 z0{Yay!Dhal_g;^j)dE_o+GyhFY{1ww10Tb9mO>|&G6RKy^Pe}h_@>w0O-TGWDKm2R z-&2RW*4Hz(N&(ii=|8rn?YlIY!i|TTs4DF*^*9TYp-jMSY^}@vk+q7+H`-B%T4(a7)Z$q~HujshWbu2r?3o14Uu?9nK)VRSuX{L(b_ z6dFwYf2}LJscX*zu_NM{LYd41MB|0nnOvWM1TC_R->OhxPFu=t=$=zV;h<+fFj*p z%LvjwRo3XPMAj`t35yaJ>-wte(z}~@qTF#L5}x=YHWL9V6?f54u*+?)IvY_4c`a;gx46sF;8g&*DjM zr8-(qko)u>x9~>!{<*q++~ySV>c5c$J*4IwSUYK3;!X$(3u+77z!-93^fSb~E3GH$ zD$;?;%(b5ME0V{oU9felx>KAa&qk5MLv}lpv=N(uwQemP04M5^zk_{AHY}^hN6yAk zTFb}j>qP?&wd#Kq8!!0=ivMLPJo`HZ+gX5%B>`$`<3Z_o2B1my)Re$J>EH8*)VYI9 z>g?)h0%o}T#J!e*)&s{_QclSFoItp`NWCoqyi5xr{RRV1vU@a84sx6t$rC*IEx^*( z&tv{`9FMtiB;c!B@x;CL4^=cW$re(JpJbXUYoJ_!Anw4j&0vFI;(<^VvR;n3H(a_l z@&}JhBd*TrWr5v zCsgs19U;e^(l~QWwL|x!MSo0ht#XJ1N47X9V7fRGh}f#F^zZ!uZtG2G@6F7sEk@4% zxjcp1&kOWR%z-+7O|))7-lbtB1}NOXF_$H4Cm2wN2PjgrjG)+mkw>CDu##yad;2Yj zwfDsCj%OnKJMS6&I8p;r`G=-R2i7ERV>>`zbPcJ4o&!V#%LbI^|GG?#-|KBFfg-Tq z0M!Q9IK8VT&^QxtF3>;Dr3(N@o|QZ0(VSJne(_`wWpd78b`5%89jG<{6V3i#NK1EO ztfbLnk$4f>=n7*mjlT3dr@?xnjN&jC5`4Jv{)Yhc^#)$Y?+`zaqPKwYM%Pe)%XM== zu-Q(WO~wBK1@Ukjk2G~`>Dqrjj85XM1nrWhVY8WfgMfMfXR)V9Lrz{f$dwP+bRG0W z>67)QR=fxpw%IU7`cJj)b$Dr!F`Ay#xg#O~)vFDl!~jeI9YZMJ5A(bT^^~t&O>G5h z!>JN~;*~#u+u$)W0NDitca&cJ7jHfnPXhR9!HgMO`hGxUJpw>)x&hAP#FJRk-*65< z5++(#BzCU4JIEx>$pc8K0wo6IjchC!eRh1k313|??yM;da8muxWdD9C&mUmmjRHQZ zyD5=p`A-D$K_1iTtHp)aFizDpdS{XgvsYF4XZreRFoVphG&z>T`#&|ET_~~axutcm zVXwVUL*ZA5}4_|Id{`cf;Z1xx~o>5%w!(}1H$iQ_g!MpNw#zk z2;VIn2^J9ef1rfxVIakHoOFQFgztakIOtCEM6Lm_u-Pu1P5VvF)}YP&-$)z4Cf&62 z#FS2K7ElmEc25t?2DF@bfX&fllm#BFO}Clge7pV!c#cv`Q#c^XLHu5EW!OhH{Kd*+ zb7$UG7WR-{YtNr~0bB}9N>dCa@G7+)jPQ(y(hr1YuOfo*6L_8Qn zkQM?TxT{v+fZU$mBw~H#QqmLl=7_o^BAnivVjR5Qb89PsaB1&dO2_!(RSs_m5`UMk zQd{yr3oGa`$VF_m_jvU(IZl+pm&w0il0(`XKngxj!kcz_no8$A0BP?~jkgl<1tM}! zG*v6c=;$`e0cyx05Ma``{9l9_Y{k*s^;vIHmBkUamIQ+BaGY7C|2Yd!PRSt@AH(^j zsyQUVGt(q~AoeCr7PfrszxV*WDoCCVS>#u1fT` zOZk02h3qR$UyBfAk`>HamR|LEZ-IsupoQO^*W7U;+b!Hn=h$Mv=>N&@5ZkwS_*TYr zt2d=RE2Gvf6S-nM&ZHosz5k_CWVhc7jvvjG_)VJ6B-?8lr@Ge#NYbRs>i=^C5)!FM z-sY~Fh-T2LaSO*;dt`L(HqWY7TD<7t(r{E?r^zU9fVy%e(B8fwf0FB{U z;U484yw|TK*p}cX$p#7@ARVU6kZPul?CYN z{zcPzIV-3J*|`v59F(xGuStv{VW3x6_7*btiGmS3+RfJ(*iOxcuiFHaAI-gW{boyO zPEz9Ho_*RK;Z6JfDH#|ID{(=DoW5nqXfVVj$y~~+=_}8oNF)hg-OQW1ZgyZ>0OSTl zwuPLg7aH~%c52>WIJFON*L&~6V%8IA$+QR+fwUovOA(%fpRGCgYWl-Rn%F&@#qk>c zi5em~MXNZm?=CYM$Q$E2pzcG7x)UHa z3l1uwkAjxOVIQr+2>PouH`aaILx-Nbziw=glF<&puNSCI)=|HmVsx(CCT7WhHAd!D zZC(>lgVU<$47nc9B+`Cwwb_VYOTu%y<^v0TKV!b{$Kx5zvp>CQ`{)OcpLR7SkoL>w zMUqGs@0Z879q`Qt_xVw07eP(9+v}1^Z$x;Wm|R?Xt1DN-snw%BI6xf~SkDGAoW#Mw zzkT&J=)cmnPVW^TSWN{$Jc~n2`F=LB`3Ze!HM{ku{dKSAX=n)}z}}adWJN%5uh1x1 z!?VkV06k6VWPzhhm6j0Nzfu@8hG`mDC>pmIHXT742|eBR-2!Q68+kjbZWH~dg@n^I zAmbZrn+%h6xg`r`HOYrX`zt9G4pRQbi~*@9M^0*}Exs90qlhM_If@MRmhN<~i(8M% z4NM!x?#vM^`HSU!nwGbdeIdK_mA1ZZ|tRkA${otOwvA}Vexr2u)F-cPb*eb^GcGYI{C2z?6 z1wl>gz6G3^ap_|@y@#UVZV8SvL5t+`Rek#_fVrRe}@YFdQ`%7dY_l}?Nhau$GtWU(gs!NmlDSBG0Gfb zIMLc`R2m~*4hfI0%Te_B@nB=2HKxd)3x1L`M%m`J>Vw>?{8`g&m^{=Jhj>lR3=^gR;1+uV|E2ruJS^M zd6K}>$u6ggs93f>v#OzRwIuEUYy-yx?x!3Ij_yYJRQ*w85 zz4}RSGkfM~(L?9`Xwa!kQJ|{uUo1#zg>Wg1Li+qE-Ze#cDKlHR$Ds&xVx;gcOYZa(!9#d!rb& zIw-4C)uycEmNH#Q(R}i1YFJ&wM*D-tC>`5hMa!}2sEjkqJ2y0rw(wRNW+1Gd_wwZHh#D26jaZ#=J(5od9&k@)uQotqeES2*gd_R4AqUY+8moEt% ze0RN7l02E0+3DCzp=)NGH&`0h(PI-UNhVG*m`PuUTuyR1j*MUGu%u2>Z+RMMjYRmd8DN5*Xbtnz(aWO`m_@G zjoM3#n0+FJY#`igOIu*4kTN@bL&UPkvkfnsTzozM_MtE!ID2NU5$0KyYOrzTI6(KrE&KgEhj?zxHx09T#!H&A z7-mr+dC>Z3^VY+8CUAl_EBW@7`}e*(TPKup2CK>bV1Oq zqNTXrcuD=5(Lv?c@tiW)^GSo!y=R^q(FUvelD2M5-`V=f#lPtYlikj6(UDc?!seh$ zkLzPCHPR0i=hZ8^XZ?JAivFPMdfTXOJV-|QYMRk6%Ji?J7T1NwArV3R268U8EN9%2 zBY1Whb#{|&2X?tRn5QTI(f7AB&=)I;<;!_ols1L@fjSmynM~D->d*!TayBsf*j$_% zVo|U91bHYLbZJD<|CT`cbxFNqj&@H9_o7z`(dBu8{CWN({`iidw-lGzJD);SsPKr{swNa5HQEo4IhsVA5NCSh@SL@rKeO&(BqMBiJu3O`L7K35v|RF5x2_3Dy{K%n8hx~ocDs9&D+{A0m-`> zhsWw>6>H9_TERbm8ba`P!e2r^@qUmmNHi|jvc4Tebrcy^OVu)2v_IMt#$3*>pIWdr zA~uyx=6uQnlm6IXNYc4Ys=tm%I~L3@KS&i0I!8T)s$}%d9dL#RQYBB3ABI(a9xnz6 ze=YQoeNDq*#pBVb?ZQ=t7ChZQ;5P``XFjMleZ76l#gLq5Hbg90Ur{5dmI<0YeR_(CqhbCG})1L7%+xS{py|Mm?t)8aFa#c=$s_yD8Xf-#pwS_0b zjg*wic$<5bE3cVGxuSc)`9ywkMoFT^<>@(b1L{Cxx2-}1h;!?^T=zQg4nj6yARzJ z{I$9ws-VRbSq{gVA-H$ttb&3V5`YR$M!|tK^<1SBlPyEVp#SOl9|&RZN`0{T9>GDC z#m=K3cnQ@`H|by!v%343cm>x@LUBw##a>l0EM`YY=Vc%(A3X{k`PIN97ZZ z!_YhCvPyAw$;pL2^1`F=_)!Urdt;#MmsQZHjL*jiH?CJ#DyLP?!(wo$R#iS9C?XQabGx-{%P;!<5Y-;wwIJ|Ft@vi+=X+AKD~pWnuX zkgsb#{!KQ2;gE~}S&sd2{mBBwDA!WORjYwMW|#NQ<*gTRT}R@VWMGaHGc*gKpNO#- zl-zZf-`K}sd2fiQe7dWh%WZMV(jr1GL_3yOVEB@-(y0K`t(G6{Q-QQQXALu>zhNeS zxBLW+i-kH|k-Uk5TzBZB8Kw?HzX-UY;yGPB%@*G2G_b%~dtDMLPA^+RrixkD<*X4O zKgN@%og$WbLl`n*V2*%O7E1mzwG4XcJ8{fN(FoO*ER9RQjV~_@sKF>65Big8w)tig zj43)6tg8I<#*Olb+1ZVS)xC<@Tq;>P>VE4s6L_E~D5ifJrL4M${>X4yw#^PAF`0HbnOnDhbxpH1qPRce6!{V$~1-tr1HTc3qT;Qpy^<}`Uo#ND6P{S%y z6JTT!j${mhOJN)64VV5Cwd#y+Ra9VR4qKciLC__VC$#?Dy`Q-0b3EgWfJcO5!JCI9 z!yw>`-JK4dYT~VqFAQ+q56-NPM>i$G&Sl%$q!R4N(FJ(?t@~q2+uD-KsVtI{ z6Ob`k<%mbX4Np|T4_sS#t;5dI z9Uv4rZSbthi*^ZU>+GfulIkCbS)XC&0 zxhl~b$Hh)B315$HJ*m-dskc$EX6tQ%_ANb?_U&99V$qtbVLz*^Hq3MgsE%hBChK$M zSNAV^4N40~$cAgmarU`PB`jC{v|_7@R{kEYdUE6{lOrAoC_BgeITUE^$hv_=l43R) zJkAbWZw^%SeBKr&bC1i3n=so3anAcH)S6#=8glJx81euj^zq&o_a@{$H*=NVaK7|5du8`D-;xsEo`HPvjP#ZTdT)@+ zN$#9oW{>sJ;9n)}YelsJr?yH(iY-)=y%DjhX=5)l&b|CW8DrqS(Ge@|h@@eL6`mTS!19v6=U z+GfdyMof+#*Wj*M>#y>aY$P##;KGwrQW({5l$APsI}rB`y2zep^{dPHRwaYd(BWsI zR*$N&(S8iW3s3lg{<>x0x{X}k0?`5KSm0U&4T+tR;7Q-SDnO_tfHDo#(WpdvqpBmS zfAhs{=G)untmi|MK4)y_nw=m6C{@J!v@8mukz*ZE#6EW`T$jUY{}P8B*gWu(4cCnu6lG$+jzn-|Tz3!0;cb=%I}MJ27+^gV3T z3ADnHK?nD98~U${FgF?x>;AY-q{c{L-c0{Rq_=&eP>iHjjZV&1E*7&bkr}Uh8_d^D zkebggA9wG3`q1o;1_P5dx*Rs#__BKSL>2Sf&OAIHr8}G(3qjc)n~?%9FKQTH zJriCPgzDIURO@(!>#C(T5xH-c#=HLrzp3)vwHHtSRzNejVIX9i-7Ax{Us!Jiim#lv zKkqQ1qZeblUy}`H|^_8 z^+koHpRK6jT(mse%*-66+vX#JbT2y^+^RLjGjtwF3=hM(h|z_4~?3lK+75^ z{xIR`3u0x#gPkeAl{;6#S9w^sA0AOlaE47cSmaWK{lLUteO%Y=(CSk7_Ny!2_N~iL zwDx~6ehbaD@pJ5-$CRZ7Ix;yvkU8PE7)uIy34LYcaVc{8y1ls&O0##{jS0SySQ6^W zrK&38pJD*c<~gf+uImVQX#p$^?$OtsV#|z|H#T;vzn0hNw2`Znb}N^lb>%p@SU~Q~ zvOdE-tTM!WYAQ5LYsySr0tYv>_fwrXJot#~k0O@Mj&I~@cEJ;M(l+lVa92H56c;}+ zBKyd%g4S_oLKZ{W%B5hn)ajgCv#Sb)BotucIFBf$^x6L}q z{73)zfzORsK+FAK_;{BuD*FkU7WOHhaK10LrT6=+QuEP!jvJ4U3Dyv+4d#*sec-PbsrP6p6J5G=G zyq5HiPem-CHmC6bp--!N3k?qiKJ(p(*{Vwx!Ix+>*~Y95JCJ#dE=G_D)L!XDEIwbb zVdm~77%O1o@vlM4+2LBp$^8+H(_TbhMwYer`LN2vpB*^*bl#6At>=V;IdNNJL-PoE zpx+DFsK}>PkaGLX-`(kk6FDin*$K=_m#vRBfV`h*1)r7V25}$F)^Dg_LQ--kGBnL{ z3F+e@&x1u z=xDELF>i4ua2vjv4?Fa6q*y3eYyT5>s7L(2Xr-%eR(D#nYwB`=s6hibQQ_gQ>7-}B zly3n0G&Cu<4D|JAGx%J8{21Fu+pYt9`sKUpN`@&N#me|ouQ z?l#dsnXfOGjI}_FxGqpl`Pu9Pg#~qTGlr&X4{!N_E6frIPdozM%=!K#ls=uQ9|+gd zU_tWp!A|;Fw7Sz?SY8?J7>l_Y)<0kWSBiutLsHDAxb_qLt~)PM-OqJ|2z_=0$8P>< zP&+%<7RnKPTqSl+nK`F~+38NKW)|gQN%-a7BPa9#C$6)XhYyTA-24^9S1|m9Rg|OI zV2ZDGwwe76ipA|gvyMQ}D|3s9&_d``S?+YIxlq^Mttq4X#+j=jbun3bRBbL7&Sa%( z^O1^0_1AjK*_hNrC;Xf!Wp!F_>i7gdw&W28e*fu0R5IpX&MDg~zH&RFgk{f03{x=O zTy||L6(sZ1!dM6b%>P<0f%?I0*h*z8+zqu%?cDslc%SX*lHnZL>2KQN!!s}xV@i@x z6PIkOBD`+5XwxWx!m3bj%EL?W2U~7iHJGE*g zo7e5VWXD^(}NXBlJ?BvWUr1P}^J^+Z`YJEJpvr7d&fXX=C z0JjB#wi}!>KWpZrGD>uaym2R$r#-*fj`_xx&I{)A61&3zL|J&)>WcO2(4yKs^l-?K zs;!q_f0@R{a1L5(><<(gtO#TW{Ikl!G@JTEi)XP;hN<- zycXDT9b0!3w+!R%D}3uY4IGR`s{Ee<*%@;O0bz3ECgEU58nZ3XAJ4}`9N|Ymv?}_z>a2aV+5>zJbvr$W7g42ZkXw&A9pR-FNBu!G5 zey|<^fwN}xgsWeyd^0UAo-_K6g;YGqK62@|ZrXxSW@=!FVrxvwXOVF;o?BEmZNmB0 z$2{CWUo`LNdk-{U8$nlJra=s1YY!t2?6>j>4dZVuXnRjzNMXR{VJtNB+U9BIkAm4k z+XhE;3c@X;*Y(AIXja<7qX}!u80cBu)|+p|FhvFStNVf$O3@dNd_Y?{!Cem?37paW zn`22Q42F9$S8Fd?RNL-YL8pWp6@a~~Fd6V41l@)}AG?w9*=W_v&FAP`?B|Xg?w!y$ zb}w9*tnQlQ#W$vbKlibepd{vZ=dyDE=e4TQ`Ahr2LETGLS| zxM*HdR%-o%i|h@?RQPRXed{FZg|j`;S=`r{kJ)rzJlcE#^#Nuv89#zZZIwq^_l^xJ z{MFpW>c5fcc3Gp^5oggb#kqsN?K{6se)qmgeO7%V?Z&4xxaQG)QP8p&+jXGTIip|y zg81hk?u<0mgw?;^(bMd~1?uVr&)G@C2*tdc3o7n=KR+_5x86OX-eV$D+v@EmV*8!o zZyxa__I_WT;8}}=jScou_uKHQe6=lekK$XW-|-Qn$$X08&YtCB2 zq-*bVJke(M4AC93p$Gp~3g~Q4lQ@R={85(`;`JpnY&(Blp5%h_L0Ydwr8aZp$k*OO z7ES0>-CIb;%$99pPS3?-&3qqC>U1p^3;OQBUoj(yIH7Tszq&|>4Khh z6hN2b^2N%_B&*C`%NH-ksJZzFynMf$I1%po$nf#0J$Y=Adu852wv*kl<}KXUMec-O zG;XY_CdFcVP|iCIbD;m$VrE&ia+kyQ*ouY} z-l4-o+mcY<=V#X)e0VVQRxsQLc+^oY@kCyU4dOMh=Z!P`QZx@+(o->VkqTd{GUio-20 z=!tOzTri{jdi&N?&XS{bMpyfcC}!$f@9Nxks|I?9d)>YBi!1pjcY9kA+Yvc|$L=P) z%W#yX*uc;|gJIigv(@u_{~;D|d9Y%6t7)pEn?D+N+rvZoK91jLs(SLOid`>YP{Kdgt4ga73E6+OWk(xOJ~SgE z!4aUvx$U8EX*756Rt970fM9!Wh$^8v3y*EnYId%kyP37{Pq*(q&0+zw?JoA~qPyyX zaPBdD(W~2fstM`+C&`jfjA6`0*qgXdq!4Pz)Cj2>fM=EGEL@HQ+w|MF?jH~i$gT2U z3JHT(**kot%sv7&0=qXv=Qd&7dbqn4>-K_Qw|{tfus$(2-L5_N-CIExo9<*$&rWzr z%fa4x>wI&h^ENY&nTG9&44z;df|8rs+M7pCR^xu<|AwyS^;7`)cW)VfQ6axTqyo
rR6*)|rG7I?hwU52cL5jX-bT5{7 zj7`4NLH~$)b;bGF23b>3qxbUB-PAz!1Z&pZ`h5700zbQ!1f#{UjxVZCb-#%)og)E` zUH(@%{Q7udpMUqu?iB_Xx{sS*eH8cY{T+@DJme#fd{e)*MRI*n{DJHPNrTaakkK)e z%MK4f8N{K#;6cil6DeYTSXR`+PL5QAhqMKq#tTpSPGzP0!pomdzc*xr6YTjEYi@^C zhF;$@Un(g30mftybBzB2bLS_2VL$%ECYj;cvD5Xug>1$_?sbq{CtG3i%@9;-!_l~e z6^WsS8@Utl>+PwcHY0th^Lchuf4K9rG2<{{8)OXryjy4YM2u!m z`+om@-1Pz$@<{$+it7o)*VKjs0AjWea;(vHHOSrj=9>WoccIzrm`%w3}o%) z?{V>?bu{^Ih%Iw%g7qo#*u4?2RKk>x=&)sk!8ZF__g*ICwPSXdf6Mz5@qj}$6RVx# z;<|8oH_9%u4M@CP2={^bVX}ugUHxCV`eL68C9wAX0ce4KGf zIHZT*m%N&VE7U<-$}+d7gK2JRtNOznvQ;C|lOy}DhLUdXbDomv4)oU^!uy-uz8uAp zs9LI)3P)Cc#vp+NSix-C~nq~Q45 zSgxx-j=X(r%BYb9h$JqsE1gO*jW2y=VaMXx@$=nl7T>3-``a2=5O8^M2#NcRxt@c< zi-$9wI^_zzGydB;eZ%Kd*p@CH)5&>XyGf@Kq+=8 zB*2DWepahreZL=hq`oX%Sq@X+*&^tl*xiW&a+z;Riteb7Kaq54c8asBIP$rhb-kja zc}o5o_0@P1nP})V*RE^0u}t|0!V}8%!QTB)t{*((B6x-BP?<2rd<8SJwA+w}$(3z8 zm;PGLkpQvj2@2lq!5<^uM)6K%GslxZ<9Dtd+rJS5i+C`)r4CetFxATE7ZQ{p z1+wotVls}u>yK`9)%#Yf8@X`~&-on|$BGdHY1?-9(yi z8GiX@qAvf!X0`7ci%|MAnL#$$Xj^xr05iAE3_xiU+ADV|WF$bfgs73zy#A%Rdn7?e zWb3P#>qaj2a0FfLis?$(qov$wCla(kbp5@IeU`gGaV)6}w|Hgh!~e7Q#zmKYPxp$C zVW0ZN+HmVKzFB#WKc_f$UTxWtzUZA*SNe>mDYw4z=4=Kou`38*>)+aQ@Pkg4U-r!R zI?YvQA1###N=%rurfv7L)VghR=DYkZ5-a6$KGs(FanY$06Ux_~^X_@k==}e@?R$-(y9d&;)PKbsfb{o%%)oo}b>33>ipRciYrP@j2^ zw?K2Z(Rx1D{h1TvSk|y@`?cZg&)SZ|4^J<6X3(&A$-fFo;C4rY^nb=%bGo+2uh-XH zr1t9Rmjx?71^q3Y8MS%If8dOeMUlzFU?-jTJjtiy7`qRea?OyKsn7Ou?bXBD$zI%- zYLad)D*O;PcS5@V|Kd#?v$HC6U)GB)<(buTu&!Ena^i**j-#@sPT~tC))?Nud%dnY zIw!Z2nens&&-sq32@kIPJi6!6?bns{T9*v+zr2&k_)v36iOc(Lne8SE{X@?$RPM~1 z_1*NAm+s-3-km(=A^paIb-@ddZH-&4bLCo?iGW12QNIM{R*S9&t=)Q(=)!lkQFngv$@l)E7vEMsU&(**b?S$d zjCi>}wV`%foAhc` znX0>PtN&eoer36;)WyaBjcfmU|D5ld`F!eW=ggB&4ZqiaxiyWmX=T;L>*1=BONES& zGF;uox!vsH^6#I%@;=>td0yW8&zdZP<#3-xNPXX|xOo%1{-?(F~n7fYhSMR?Aj?OgU*%b#pNa5 z1P%!uU|ezR@;=^nSNnXizxNi+x955NYrgBvYoBMi8;h;q_<3JkHS^tZ}sg{i@vXf=>blFHs~f@l>WJBzIk`u^9BEz84C7p%9y|7#q-2Mv6GJv z-?=XK^Ijrkk?A^N11`n~1{d0aBZH%WZXu8}<-dHXdfNg)0p;r;OFdovT-G@yGywpR CNw>lP literal 0 HcmV?d00001 diff --git a/src/web/templates/admin/base.html b/src/web/templates/admin/base.html new file mode 100644 index 0000000..3b9e486 --- /dev/null +++ b/src/web/templates/admin/base.html @@ -0,0 +1,105 @@ + + + + + + {% block title %}Admin - LinkedIn Posts{% endblock %} + + + + {% block head %}{% endblock %} + + + + + + +
+
+ {% block content %}{% endblock %} +
+
+ + {% block scripts %}{% endblock %} + + diff --git a/src/web/templates/admin/create_post.html b/src/web/templates/admin/create_post.html new file mode 100644 index 0000000..ee3bf58 --- /dev/null +++ b/src/web/templates/admin/create_post.html @@ -0,0 +1,539 @@ +{% extends "base.html" %} +{% block title %}Post erstellen - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Post erstellen

+

Generiere einen neuen LinkedIn Post mit AI

+
+ +
+ +
+
+ +
+ + +
+ + + + + + + + +
+ +
+ + + +
+
+ + + + + +
+ + {% if not customers %} +
+

Noch keine Kunden vorhanden. Erstelle zuerst einen Kunden.

+
+ {% endif %} +
+ + +
+
+

Generierter Post

+ + + + +
+

Wähle einen Kunden und ein Topic, dann klicke auf "Post generieren"...

+
+
+
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/admin/dashboard.html b/src/web/templates/admin/dashboard.html new file mode 100644 index 0000000..f106474 --- /dev/null +++ b/src/web/templates/admin/dashboard.html @@ -0,0 +1,97 @@ +{% extends "base.html" %} +{% block title %}Dashboard - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Dashboard

+

Willkommen zum LinkedIn Post Creation System

+
+ +{% if error %} +
+ Error: {{ error }} +
+{% endif %} + + +
+
+
+
+ +
+
+

Kunden

+

{{ customers_count or 0 }}

+
+
+
+ +
+
+
+ +
+
+

Generierte Posts

+

{{ total_posts or 0 }}

+
+
+
+ +
+
+
+ +
+
+

AI Agents

+

5

+
+
+
+
+ + + +{% endblock %} diff --git a/src/web/templates/admin/login.html b/src/web/templates/admin/login.html new file mode 100644 index 0000000..1626ac6 --- /dev/null +++ b/src/web/templates/admin/login.html @@ -0,0 +1,72 @@ + + + + + + Login - LinkedIn Posts + + + + + +
+
+
+ Logo +

LinkedIn Posts

+

Admin Panel

+
+ + {% if error %} +
+ Falsches Passwort. Bitte versuche es erneut. +
+ {% endif %} + +
+
+ + +
+ + +
+
+
+ + diff --git a/src/web/templates/admin/new_customer.html b/src/web/templates/admin/new_customer.html new file mode 100644 index 0000000..dc4ca02 --- /dev/null +++ b/src/web/templates/admin/new_customer.html @@ -0,0 +1,274 @@ +{% extends "base.html" %} +{% block title %}Neuer Kunde - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Neuer Kunde

+

Richte einen neuen Kunden ein und starte das initiale Setup

+
+ +
+
+ +
+

Basis-Informationen

+
+
+ + +
+
+ + +
+
+
+ + +
+
+ + +
+
+
+
+ + +
+

Persona & Stil

+
+
+ + +
+
+ + +
+
+ + +
+
+
+ + +
+
+

Post-Typen

+ +
+

Definiere verschiedene Arten von Posts (z.B. "Thought Leader", "Case Study", "How-To"). Diese werden zur Kategorisierung und typ-spezifischen Analyse verwendet.

+ +
+ +
+
+ + + + + + + + +
+ +
+
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/admin/post_detail.html b/src/web/templates/admin/post_detail.html new file mode 100644 index 0000000..81ef2cb --- /dev/null +++ b/src/web/templates/admin/post_detail.html @@ -0,0 +1,1481 @@ +{% extends "base.html" %} +{% block title %}{{ post.topic_title }} - Post Details{% endblock %} + +{% block head %} + +{% endblock %} + +{% block content %} + +
+ + + Zurück zu allen Posts + +
+
+

+ {{ post.topic_title or 'Untitled Post' }} +

+
+ + + {{ customer.name }} + + | + {{ post.created_at.strftime('%d.%m.%Y um %H:%M Uhr') if post.created_at else 'N/A' }} + | + {{ post.iterations }} Iteration{{ 's' if post.iterations != 1 else '' }} +
+
+
+ + {{ post.status | capitalize }} + + {% if final_feedback %} + + Score: {{ final_feedback.overall_score }}/100 + + {% endif %} +
+
+
+ + +
+ +
+ + +
+
+ +
+
+
+

+ + Finaler Post +

+
+ +
+ + +
+ +
+
+ + +
+
+
+ {% if profile_picture_url %} + {{ customer.name }} + {% else %} + {{ customer.name[:2] | upper if customer.name else 'UN' }} + {% endif %} +
+ +
+ + + +
+
+ +
...mehr anzeigen
+
+ + + + + + + 42 + 12 Kommentare • 3 Reposts +
+
+ + + + +
+
+ + + +
+
+ + +
+ + {% if final_feedback and final_feedback.scores %} +
+

+ + Score-Aufschlüsselung +

+
+
+
+ Authentizität & Stil + {{ final_feedback.scores.authenticity_and_style }}/40 +
+
+
+
+
+
+
+ Content-Qualität + {{ final_feedback.scores.content_quality }}/35 +
+
+
+
+
+
+
+ Technische Umsetzung + {{ final_feedback.scores.technical_execution }}/25 +
+
+
+
+
+
+
+ Gesamt + {{ final_feedback.overall_score }}/100 +
+
+
+
+ {% endif %} + + + {% if final_feedback %} +
+

+ + Finales Feedback +

+

{{ final_feedback.feedback }}

+ {% if final_feedback.strengths %} +
+ Stärken +
    + {% for s in final_feedback.strengths %} +
  • + {{ s }}
  • + {% endfor %} +
+
+ {% endif %} +
+ {% endif %} + + +
+

Aktionen

+
+ + + + + Neuen Post erstellen + +
+
+
+
+
+ + + {% if post.writer_versions and post.writer_versions | length > 0 %} +
+
+
+

+ + Iterationen +

+ +
+ + + 1 / {{ post.writer_versions | length }} + + +
+
+ + + {% for i in range(post.writer_versions | length) %} +
+
+ Version {{ i + 1 }} + {% if post.critic_feedback and i < post.critic_feedback | length %} +
+ + Score: {{ post.critic_feedback[i].overall_score }}/100 + + {% if post.critic_feedback[i].approved %} + Approved + {% endif %} +
+ {% endif %} +
+ +
+
{{ post.writer_versions[i] }}
+
+ + {% if post.critic_feedback and i < post.critic_feedback | length %} + {% set fb = post.critic_feedback[i] %} +
+

+ + Critic Feedback +

+

{{ fb.feedback }}

+
+ {% if fb.strengths %} +
+ Stärken +
    + {% for s in fb.strengths %} +
  • + {{ s }}
  • + {% endfor %} +
+
+ {% endif %} + {% if fb.improvements %} +
+ Verbesserungen +
    + {% for imp in fb.improvements %} +
  • - {{ imp }}
  • + {% endfor %} +
+
+ {% endif %} +
+
+ {% endif %} +
+ {% endfor %} +
+
+ {% endif %} + + + {% if reference_posts %} +
+
+

+ + Referenz-Posts für KI + ({{ reference_posts | length }} Posts) +

+

Diese echten LinkedIn-Posts wurden der KI als Stil-Referenz gegeben:

+ +
+ {% for ref_post in reference_posts %} +
+
+ + Beispiel {{ loop.index }} + + {{ ref_post | length }} Zeichen +
+
{{ ref_post[:500] }}{% if ref_post | length > 500 %}...{% endif %}
+
+ {% endfor %} +
+
+
+ {% endif %} + + + {% if profile_analysis %} +
+
+

+ + Profil-Analyse +

+ + + {% if profile_analysis.writing_style %} +
+

Schreibstil

+
+
+ Perspektive + {{ profile_analysis.writing_style.perspective or 'N/A' }} +
+
+ Ansprache + {{ profile_analysis.writing_style.form_of_address or 'N/A' }} +
+
+ Tonalität + {{ profile_analysis.writing_style.tone or 'N/A' }} +
+ {% if profile_analysis.writing_style.average_word_count %} +
+ Ø Wortanzahl + {{ profile_analysis.writing_style.average_word_count }} Wörter +
+ {% endif %} +
+
+ {% endif %} + + + {% if profile_analysis.linguistic_fingerprint %} +
+

Sprachlicher Fingerabdruck

+
+
+ Energie-Level +
+
+
+
+ {{ profile_analysis.linguistic_fingerprint.energy_level or 'N/A' }}/10 +
+
+ {% if profile_analysis.linguistic_fingerprint.formality_level %} +
+ Formalität +
+
+
+
+ {{ profile_analysis.linguistic_fingerprint.formality_level }}/10 +
+
+ {% endif %} +
+ + {% if profile_analysis.linguistic_fingerprint.signature_phrases %} +
+ Signature Phrases +
+ {% for phrase in profile_analysis.linguistic_fingerprint.signature_phrases %} + {{ phrase }} + {% endfor %} +
+
+ {% endif %} +
+ {% endif %} + + + {% if profile_analysis.phrase_library %} +
+

Phrasen-Bibliothek

+
+ {% if profile_analysis.phrase_library.hook_phrases %} +
+ Hook-Phrasen +
+ {% for hook in profile_analysis.phrase_library.hook_phrases[:4] %} +

"{{ hook }}"

+ {% endfor %} +
+
+ {% endif %} + + {% if profile_analysis.phrase_library.emotional_expressions %} +
+ Emotionale Ausdrücke +
+ {% for expr in profile_analysis.phrase_library.emotional_expressions[:6] %} + {{ expr }} + {% endfor %} +
+
+ {% endif %} + + {% if profile_analysis.phrase_library.cta_phrases %} +
+ CTA-Phrasen +
+ {% for cta in profile_analysis.phrase_library.cta_phrases[:3] %} +

"{{ cta }}"

+ {% endfor %} +
+
+ {% endif %} +
+
+ {% endif %} + + + {% if profile_analysis.tone_analysis %} +
+

Ton-Analyse

+
+ {% if profile_analysis.tone_analysis.primary_tone %} +
+ Primärer Ton + {{ profile_analysis.tone_analysis.primary_tone }} +
+ {% endif %} + {% if profile_analysis.tone_analysis.secondary_tones %} +
+ Sekundäre Töne + {{ profile_analysis.tone_analysis.secondary_tones | join(', ') }} +
+ {% endif %} +
+
+ {% endif %} + + + {% if profile_analysis.audience_insights %} +
+

Zielgruppen-Insights

+
+ {% if profile_analysis.audience_insights.industry_context %} +
+ Branche + {{ profile_analysis.audience_insights.industry_context }} +
+ {% endif %} + {% if profile_analysis.audience_insights.target_audience %} +
+ Zielgruppe + {{ profile_analysis.audience_insights.target_audience }} +
+ {% endif %} +
+ + {% if profile_analysis.audience_insights.pain_points_addressed %} +
+ Adressierte Pain Points +
+ {% for pain in profile_analysis.audience_insights.pain_points_addressed %} + {{ pain }} + {% endfor %} +
+
+ {% endif %} +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis %} +
+
+
+

+ + Post-Typ Analyse +

+ {% if post_type %} + + {{ post_type.name }} + + {% endif %} +
+ + {% if post_type_analysis.post_count %} +

Basierend auf {{ post_type_analysis.post_count }} analysierten Posts dieses Typs

+ {% endif %} + + + {% if post_type_analysis.structure_patterns %} +
+

Struktur-Muster

+
+ {% if post_type_analysis.structure_patterns.typical_structure %} +
+ Typische Struktur + {{ post_type_analysis.structure_patterns.typical_structure }} +
+ {% endif %} + {% if post_type_analysis.structure_patterns.paragraph_count %} +
+ Absätze + {{ post_type_analysis.structure_patterns.paragraph_count }} +
+ {% endif %} + {% if post_type_analysis.structure_patterns.paragraph_length %} +
+ Absatzlänge + {{ post_type_analysis.structure_patterns.paragraph_length }} +
+ {% endif %} + {% if post_type_analysis.structure_patterns.uses_lists is defined %} +
+ Listen + {{ 'Ja' if post_type_analysis.structure_patterns.uses_lists else 'Nein' }} +
+ {% endif %} + {% if post_type_analysis.structure_patterns.list_style %} +
+ Listen-Stil + {{ post_type_analysis.structure_patterns.list_style }} +
+ {% endif %} +
+ {% if post_type_analysis.structure_patterns.structure_template %} +
+ Struktur-Vorlage +
{{ post_type_analysis.structure_patterns.structure_template }}
+
+ {% endif %} +
+ {% endif %} + + + {% if post_type_analysis.language_style %} +
+

Sprachstil

+
+ {% if post_type_analysis.language_style.tone %} +
+ Tonalität + {{ post_type_analysis.language_style.tone }} +
+ {% endif %} + {% if post_type_analysis.language_style.perspective %} +
+ Perspektive + {{ post_type_analysis.language_style.perspective }} +
+ {% endif %} + {% if post_type_analysis.language_style.energy_level %} +
+ Energie-Level +
+
+
+
+ {{ post_type_analysis.language_style.energy_level }}/10 +
+
+ {% endif %} + {% if post_type_analysis.language_style.formality %} +
+ Formalität + {{ post_type_analysis.language_style.formality }} +
+ {% endif %} + {% if post_type_analysis.language_style.sentence_types %} +
+ Satz-Typen + {{ post_type_analysis.language_style.sentence_types }} +
+ {% endif %} +
+ + {% if post_type_analysis.language_style.typical_sentence_starters %} +
+ Typische Satzanfänge +
+ {% for starter in post_type_analysis.language_style.typical_sentence_starters[:8] %} + "{{ starter }}" + {% endfor %} +
+
+ {% endif %} + + {% if post_type_analysis.language_style.signature_phrases %} +
+ Signature Phrases +
+ {% for phrase in post_type_analysis.language_style.signature_phrases[:5] %} +

"{{ phrase }}"

+ {% endfor %} +
+
+ {% endif %} +
+ {% endif %} + + + {% if post_type_analysis.hooks %} +
+

Hook-Muster

+ {% if post_type_analysis.hooks.hook_types %} +
+ {% for hook_type in post_type_analysis.hooks.hook_types %} + {{ hook_type }} + {% endfor %} +
+ {% endif %} + + {% if post_type_analysis.hooks.real_examples %} +
+ {% for example in post_type_analysis.hooks.real_examples[:4] %} +
+
+ {{ example.type or 'Hook' }} + {% if example.why_effective %} + {{ example.why_effective }} + {% endif %} +
+

"{{ example.hook }}"

+
+ {% endfor %} +
+ {% endif %} + + {% if post_type_analysis.hooks.average_hook_length %} +
+ Durchschnittliche Hook-Länge: {{ post_type_analysis.hooks.average_hook_length }} +
+ {% endif %} +
+ {% endif %} + + + {% if post_type_analysis.ctas %} +
+

CTA-Muster

+ {% if post_type_analysis.ctas.cta_types %} +
+ {% for cta_type in post_type_analysis.ctas.cta_types %} + {{ cta_type }} + {% endfor %} +
+ {% endif %} + + {% if post_type_analysis.ctas.real_examples %} +
+ {% for example in post_type_analysis.ctas.real_examples[:4] %} +
+ {{ example.type or 'CTA' }} +

"{{ example.cta }}"

+
+ {% endfor %} +
+ {% endif %} + +
+ {% if post_type_analysis.ctas.cta_position %} +
+ Position + {{ post_type_analysis.ctas.cta_position }} +
+ {% endif %} + {% if post_type_analysis.ctas.cta_intensity %} +
+ Intensität + {{ post_type_analysis.ctas.cta_intensity }} +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis.visual_patterns %} +
+

Visuelle Elemente

+
+ {% if post_type_analysis.visual_patterns.emoji_usage %} +
+ Emoji-Häufigkeit + {{ post_type_analysis.visual_patterns.emoji_usage.frequency or 'mittel' }} +
+ {% if post_type_analysis.visual_patterns.emoji_usage.typical_emojis %} +
+ Typische Emojis + {{ post_type_analysis.visual_patterns.emoji_usage.typical_emojis | join(' ') }} +
+ {% endif %} + {% if post_type_analysis.visual_patterns.emoji_usage.placement %} +
+ Platzierung + {{ post_type_analysis.visual_patterns.emoji_usage.placement }} +
+ {% endif %} + {% endif %} + {% if post_type_analysis.visual_patterns.formatting %} +
+ Formatierung + {{ post_type_analysis.visual_patterns.formatting }} +
+ {% endif %} + {% if post_type_analysis.visual_patterns.whitespace %} +
+ Whitespace + {{ post_type_analysis.visual_patterns.whitespace }} +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis.length_patterns %} +
+

Längen-Muster

+
+ {% if post_type_analysis.length_patterns.ideal_length %} +
+ Ideale Länge + {{ post_type_analysis.length_patterns.ideal_length }} +
+ {% endif %} + {% if post_type_analysis.length_patterns.average_words %} +
+ Durchschnitt + {{ post_type_analysis.length_patterns.average_words }} Wörter +
+ {% endif %} + {% if post_type_analysis.length_patterns.range %} +
+ Range + {{ post_type_analysis.length_patterns.range }} Wörter +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis.content_focus %} +
+

Inhaltlicher Fokus

+ {% if post_type_analysis.content_focus.main_themes %} +
+ {% for theme in post_type_analysis.content_focus.main_themes %} + {{ theme }} + {% endfor %} +
+ {% endif %} +
+ {% if post_type_analysis.content_focus.value_proposition %} +
+ Mehrwert + {{ post_type_analysis.content_focus.value_proposition }} +
+ {% endif %} + {% if post_type_analysis.content_focus.target_emotion %} +
+ Ziel-Emotion + {{ post_type_analysis.content_focus.target_emotion }} +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis.recurring_elements %} +
+

Wiederkehrende Elemente

+ {% if post_type_analysis.recurring_elements.phrases %} +
+ Wiederkehrende Phrasen +
+ {% for phrase in post_type_analysis.recurring_elements.phrases[:8] %} + "{{ phrase }}" + {% endfor %} +
+
+ {% endif %} + {% if post_type_analysis.recurring_elements.transitions %} +
+ Typische Übergänge +
+ {% for transition in post_type_analysis.recurring_elements.transitions[:4] %} +

"{{ transition }}"

+ {% endfor %} +
+
+ {% endif %} + {% if post_type_analysis.recurring_elements.closings %} +
+ Schlussformulierungen +
+ {% for closing in post_type_analysis.recurring_elements.closings[:4] %} +

"{{ closing }}"

+ {% endfor %} +
+
+ {% endif %} +
+ {% endif %} + + + {% if post_type_analysis.writing_guidelines %} +
+

Schreib-Richtlinien

+
+ {% if post_type_analysis.writing_guidelines.dos %} +
+ DO +
    + {% for do in post_type_analysis.writing_guidelines.dos %} +
  • + + {{ do }} +
  • + {% endfor %} +
+
+ {% endif %} + {% if post_type_analysis.writing_guidelines.donts %} +
+ DON'T +
    + {% for dont in post_type_analysis.writing_guidelines.donts %} +
  • + + {{ dont }} +
  • + {% endfor %} +
+
+ {% endif %} +
+ {% if post_type_analysis.writing_guidelines.key_success_factors %} +
+ Erfolgsfaktoren +
    + {% for factor in post_type_analysis.writing_guidelines.key_success_factors %} +
  • + + {{ factor }} +
  • + {% endfor %} +
+
+ {% endif %} +
+ {% endif %} +
+
+ {% endif %} + + + +{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/admin/posts.html b/src/web/templates/admin/posts.html new file mode 100644 index 0000000..65dc490 --- /dev/null +++ b/src/web/templates/admin/posts.html @@ -0,0 +1,152 @@ +{% extends "base.html" %} +{% block title %}Alle Posts - LinkedIn Posts{% endblock %} + +{% block head %} + +{% endblock %} + +{% block content %} +
+
+

Alle Posts

+

{{ total_posts }} generierte Posts

+
+ + + Neuer Post + +
+ +{% if error %} +
+ Error: {{ error }} +
+{% endif %} + +{% if customers_with_posts %} +
+ {% for item in customers_with_posts %} + {% if item.posts %} + + {% endif %} + {% endfor %} +
+{% else %} +
+
+ +
+

Noch keine Posts

+

Erstelle deinen ersten LinkedIn Post mit KI-Unterstützung.

+ + + Post erstellen + +
+{% endif %} +{% endblock %} diff --git a/src/web/templates/admin/research.html b/src/web/templates/admin/research.html new file mode 100644 index 0000000..ce31199 --- /dev/null +++ b/src/web/templates/admin/research.html @@ -0,0 +1,215 @@ +{% extends "base.html" %} +{% block title %}Research Topics - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Research Topics

+

Recherchiere neue Content-Themen mit Perplexity AI

+
+ +
+ +
+
+
+ + +
+ + + + + + + + +
+ + {% if not customers %} +
+

Noch keine Kunden vorhanden. Erstelle zuerst einen Kunden.

+
+ {% endif %} +
+ + +
+
+

Gefundene Topics

+
+

Starte eine Recherche um Topics zu finden...

+
+
+
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/admin/scraped_posts.html b/src/web/templates/admin/scraped_posts.html new file mode 100644 index 0000000..72acc01 --- /dev/null +++ b/src/web/templates/admin/scraped_posts.html @@ -0,0 +1,571 @@ +{% extends "base.html" %} +{% block title %}Gescrapte Posts - LinkedIn Posts{% endblock %} + +{% block head %} + +{% endblock %} + +{% block content %} +
+
+
+

Gescrapte Posts verwalten

+

Posts manuell kategorisieren und Post-Typ-Analyse triggern

+
+
+
+ + +
+
+
+ + +
+
+ + +
+
+
+ + + + + + + + + + + + + +{% if not customers %} +
+

Noch keine Kunden vorhanden. Erstelle zuerst einen Kunden.

+
+{% endif %} + + + +{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/admin/status.html b/src/web/templates/admin/status.html new file mode 100644 index 0000000..2458d58 --- /dev/null +++ b/src/web/templates/admin/status.html @@ -0,0 +1,159 @@ +{% extends "base.html" %} +{% block title %}Status - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Status

+

Übersicht über alle Kunden und deren Setup-Status

+
+ +{% if error %} +
+ Error: {{ error }} +
+{% endif %} + +{% if customer_statuses %} +
+ {% for item in customer_statuses %} +
+ +
+
+
+
+ {% if item.profile_picture %} + {{ item.customer.name }} + {% else %} + {{ item.customer.name[0] | upper }} + {% endif %} +
+
+

{{ item.customer.name }}

+

{{ item.customer.company_name or 'Kein Unternehmen' }}

+
+
+
+ {% if item.status.ready_for_posts %} + + + Bereit für Posts + + {% else %} + + + Setup unvollständig + + {% endif %} +
+
+
+ + +
+
+ +
+
+ {% if item.status.has_scraped_posts %} + + {% else %} + + {% endif %} + Scraped Posts +
+

{{ item.status.scraped_posts_count }}

+
+ + +
+
+ {% if item.status.has_profile_analysis %} + + {% else %} + + {% endif %} + Profil Analyse +
+

{{ 'Vorhanden' if item.status.has_profile_analysis else 'Fehlt' }}

+
+ + +
+
+ {% if item.status.research_count > 0 %} + + {% else %} + + {% endif %} + Research Topics +
+

{{ item.status.research_count }}

+
+ + +
+
+ + Generierte Posts +
+

{{ item.status.posts_count }}

+
+
+ + + {% if item.status.missing_items %} +
+

+ + Fehlende Elemente +

+
    + {% for item_missing in item.status.missing_items %} +
  • + + {{ item_missing }} +
  • + {% endfor %} +
+
+ {% endif %} + + +
+ {% if not item.status.has_profile_analysis %} + + Setup wiederholen + + {% endif %} + {% if item.status.research_count == 0 %} + + Recherche starten + + {% endif %} + {% if item.status.ready_for_posts %} + + Post erstellen + + {% endif %} + + + Als User einloggen + +
+
+
+ {% endfor %} +
+{% else %} +
+ +

Noch keine Kunden

+

Erstelle deinen ersten Kunden, um den Status zu sehen.

+ + + Neuer Kunde + +
+{% endif %} +{% endblock %} diff --git a/src/web/templates/base.html b/src/web/templates/base.html new file mode 100644 index 0000000..a908273 --- /dev/null +++ b/src/web/templates/base.html @@ -0,0 +1,103 @@ + + + + + + {% block title %}LinkedIn Posts{% endblock %} + + + + {% block head %}{% endblock %} + + + + + + +
+
+ {% block content %}{% endblock %} +
+
+ + {% block scripts %}{% endblock %} + + diff --git a/src/web/templates/create_post.html b/src/web/templates/create_post.html new file mode 100644 index 0000000..5f7630d --- /dev/null +++ b/src/web/templates/create_post.html @@ -0,0 +1,539 @@ +{% extends "base.html" %} +{% block title %}Post erstellen - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Post erstellen

+

Generiere einen neuen LinkedIn Post mit AI

+
+ +
+ +
+
+ +
+ + +
+ + + + + + + + +
+ +
+ + + +
+
+ + + + + +
+ + {% if not customers %} +
+

Noch keine Kunden vorhanden. Erstelle zuerst einen Kunden.

+
+ {% endif %} +
+ + +
+
+

Generierter Post

+ + + + +
+

Wähle einen Kunden und ein Topic, dann klicke auf "Post generieren"...

+
+
+
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/dashboard.html b/src/web/templates/dashboard.html new file mode 100644 index 0000000..a314c83 --- /dev/null +++ b/src/web/templates/dashboard.html @@ -0,0 +1,97 @@ +{% extends "base.html" %} +{% block title %}Dashboard - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Dashboard

+

Willkommen zum LinkedIn Post Creation System

+
+ +{% if error %} +
+ Error: {{ error }} +
+{% endif %} + + +
+
+
+
+ +
+
+

Kunden

+

{{ customers_count or 0 }}

+
+
+
+ +
+
+
+ +
+
+

Generierte Posts

+

{{ total_posts or 0 }}

+
+
+
+ +
+
+
+ +
+
+

AI Agents

+

5

+
+
+
+
+ + + +{% endblock %} diff --git a/src/web/templates/login.html b/src/web/templates/login.html new file mode 100644 index 0000000..3398c88 --- /dev/null +++ b/src/web/templates/login.html @@ -0,0 +1,72 @@ + + + + + + Login - LinkedIn Posts + + + + + +
+
+
+ Logo +

LinkedIn Posts

+

AI Workflow System

+
+ + {% if error %} +
+ Falsches Passwort. Bitte versuche es erneut. +
+ {% endif %} + +
+
+ + +
+ + +
+
+
+ + diff --git a/src/web/templates/new_customer.html b/src/web/templates/new_customer.html new file mode 100644 index 0000000..7226155 --- /dev/null +++ b/src/web/templates/new_customer.html @@ -0,0 +1,274 @@ +{% extends "base.html" %} +{% block title %}Neuer Kunde - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Neuer Kunde

+

Richte einen neuen Kunden ein und starte das initiale Setup

+
+ +
+
+ +
+

Basis-Informationen

+
+
+ + +
+
+ + +
+
+
+ + +
+
+ + +
+
+
+
+ + +
+

Persona & Stil

+
+
+ + +
+
+ + +
+
+ + +
+
+
+ + +
+
+

Post-Typen

+ +
+

Definiere verschiedene Arten von Posts (z.B. "Thought Leader", "Case Study", "How-To"). Diese werden zur Kategorisierung und typ-spezifischen Analyse verwendet.

+ +
+ +
+
+ + + + + + + + +
+ +
+
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/post_detail.html b/src/web/templates/post_detail.html new file mode 100644 index 0000000..041d2d7 --- /dev/null +++ b/src/web/templates/post_detail.html @@ -0,0 +1,1481 @@ +{% extends "base.html" %} +{% block title %}{{ post.topic_title }} - Post Details{% endblock %} + +{% block head %} + +{% endblock %} + +{% block content %} + +
+ + + Zurück zu allen Posts + +
+
+

+ {{ post.topic_title or 'Untitled Post' }} +

+
+ + + {{ customer.name }} + + | + {{ post.created_at.strftime('%d.%m.%Y um %H:%M Uhr') if post.created_at else 'N/A' }} + | + {{ post.iterations }} Iteration{{ 's' if post.iterations != 1 else '' }} +
+
+
+ + {{ post.status | capitalize }} + + {% if final_feedback %} + + Score: {{ final_feedback.overall_score }}/100 + + {% endif %} +
+
+
+ + +
+ +
+ + +
+
+ +
+
+
+

+ + Finaler Post +

+
+ +
+ + +
+ +
+
+ + +
+
+
+ {% if profile_picture_url %} + {{ customer.name }} + {% else %} + {{ customer.name[:2] | upper if customer.name else 'UN' }} + {% endif %} +
+ +
+ + + +
+
+ +
...mehr anzeigen
+
+ + + + + + + 42 + 12 Kommentare • 3 Reposts +
+
+ + + + +
+
+ + + +
+
+ + +
+ + {% if final_feedback and final_feedback.scores %} +
+

+ + Score-Aufschlüsselung +

+
+
+
+ Authentizität & Stil + {{ final_feedback.scores.authenticity_and_style }}/40 +
+
+
+
+
+
+
+ Content-Qualität + {{ final_feedback.scores.content_quality }}/35 +
+
+
+
+
+
+
+ Technische Umsetzung + {{ final_feedback.scores.technical_execution }}/25 +
+
+
+
+
+
+
+ Gesamt + {{ final_feedback.overall_score }}/100 +
+
+
+
+ {% endif %} + + + {% if final_feedback %} +
+

+ + Finales Feedback +

+

{{ final_feedback.feedback }}

+ {% if final_feedback.strengths %} +
+ Stärken +
    + {% for s in final_feedback.strengths %} +
  • + {{ s }}
  • + {% endfor %} +
+
+ {% endif %} +
+ {% endif %} + + +
+

Aktionen

+
+ + + + + Neuen Post erstellen + +
+
+
+
+
+ + + {% if post.writer_versions and post.writer_versions | length > 0 %} +
+
+
+

+ + Iterationen +

+ +
+ + + 1 / {{ post.writer_versions | length }} + + +
+
+ + + {% for i in range(post.writer_versions | length) %} +
+
+ Version {{ i + 1 }} + {% if post.critic_feedback and i < post.critic_feedback | length %} +
+ + Score: {{ post.critic_feedback[i].overall_score }}/100 + + {% if post.critic_feedback[i].approved %} + Approved + {% endif %} +
+ {% endif %} +
+ +
+
{{ post.writer_versions[i] }}
+
+ + {% if post.critic_feedback and i < post.critic_feedback | length %} + {% set fb = post.critic_feedback[i] %} +
+

+ + Critic Feedback +

+

{{ fb.feedback }}

+
+ {% if fb.strengths %} +
+ Stärken +
    + {% for s in fb.strengths %} +
  • + {{ s }}
  • + {% endfor %} +
+
+ {% endif %} + {% if fb.improvements %} +
+ Verbesserungen +
    + {% for imp in fb.improvements %} +
  • - {{ imp }}
  • + {% endfor %} +
+
+ {% endif %} +
+
+ {% endif %} +
+ {% endfor %} +
+
+ {% endif %} + + + {% if reference_posts %} +
+
+

+ + Referenz-Posts für KI + ({{ reference_posts | length }} Posts) +

+

Diese echten LinkedIn-Posts wurden der KI als Stil-Referenz gegeben:

+ +
+ {% for ref_post in reference_posts %} +
+
+ + Beispiel {{ loop.index }} + + {{ ref_post | length }} Zeichen +
+
{{ ref_post[:500] }}{% if ref_post | length > 500 %}...{% endif %}
+
+ {% endfor %} +
+
+
+ {% endif %} + + + {% if profile_analysis %} +
+
+

+ + Profil-Analyse +

+ + + {% if profile_analysis.writing_style %} +
+

Schreibstil

+
+
+ Perspektive + {{ profile_analysis.writing_style.perspective or 'N/A' }} +
+
+ Ansprache + {{ profile_analysis.writing_style.form_of_address or 'N/A' }} +
+
+ Tonalität + {{ profile_analysis.writing_style.tone or 'N/A' }} +
+ {% if profile_analysis.writing_style.average_word_count %} +
+ Ø Wortanzahl + {{ profile_analysis.writing_style.average_word_count }} Wörter +
+ {% endif %} +
+
+ {% endif %} + + + {% if profile_analysis.linguistic_fingerprint %} +
+

Sprachlicher Fingerabdruck

+
+
+ Energie-Level +
+
+
+
+ {{ profile_analysis.linguistic_fingerprint.energy_level or 'N/A' }}/10 +
+
+ {% if profile_analysis.linguistic_fingerprint.formality_level %} +
+ Formalität +
+
+
+
+ {{ profile_analysis.linguistic_fingerprint.formality_level }}/10 +
+
+ {% endif %} +
+ + {% if profile_analysis.linguistic_fingerprint.signature_phrases %} +
+ Signature Phrases +
+ {% for phrase in profile_analysis.linguistic_fingerprint.signature_phrases %} + {{ phrase }} + {% endfor %} +
+
+ {% endif %} +
+ {% endif %} + + + {% if profile_analysis.phrase_library %} +
+

Phrasen-Bibliothek

+
+ {% if profile_analysis.phrase_library.hook_phrases %} +
+ Hook-Phrasen +
+ {% for hook in profile_analysis.phrase_library.hook_phrases[:4] %} +

"{{ hook }}"

+ {% endfor %} +
+
+ {% endif %} + + {% if profile_analysis.phrase_library.emotional_expressions %} +
+ Emotionale Ausdrücke +
+ {% for expr in profile_analysis.phrase_library.emotional_expressions[:6] %} + {{ expr }} + {% endfor %} +
+
+ {% endif %} + + {% if profile_analysis.phrase_library.cta_phrases %} +
+ CTA-Phrasen +
+ {% for cta in profile_analysis.phrase_library.cta_phrases[:3] %} +

"{{ cta }}"

+ {% endfor %} +
+
+ {% endif %} +
+
+ {% endif %} + + + {% if profile_analysis.tone_analysis %} +
+

Ton-Analyse

+
+ {% if profile_analysis.tone_analysis.primary_tone %} +
+ Primärer Ton + {{ profile_analysis.tone_analysis.primary_tone }} +
+ {% endif %} + {% if profile_analysis.tone_analysis.secondary_tones %} +
+ Sekundäre Töne + {{ profile_analysis.tone_analysis.secondary_tones | join(', ') }} +
+ {% endif %} +
+
+ {% endif %} + + + {% if profile_analysis.audience_insights %} +
+

Zielgruppen-Insights

+
+ {% if profile_analysis.audience_insights.industry_context %} +
+ Branche + {{ profile_analysis.audience_insights.industry_context }} +
+ {% endif %} + {% if profile_analysis.audience_insights.target_audience %} +
+ Zielgruppe + {{ profile_analysis.audience_insights.target_audience }} +
+ {% endif %} +
+ + {% if profile_analysis.audience_insights.pain_points_addressed %} +
+ Adressierte Pain Points +
+ {% for pain in profile_analysis.audience_insights.pain_points_addressed %} + {{ pain }} + {% endfor %} +
+
+ {% endif %} +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis %} +
+
+
+

+ + Post-Typ Analyse +

+ {% if post_type %} + + {{ post_type.name }} + + {% endif %} +
+ + {% if post_type_analysis.post_count %} +

Basierend auf {{ post_type_analysis.post_count }} analysierten Posts dieses Typs

+ {% endif %} + + + {% if post_type_analysis.structure_patterns %} +
+

Struktur-Muster

+
+ {% if post_type_analysis.structure_patterns.typical_structure %} +
+ Typische Struktur + {{ post_type_analysis.structure_patterns.typical_structure }} +
+ {% endif %} + {% if post_type_analysis.structure_patterns.paragraph_count %} +
+ Absätze + {{ post_type_analysis.structure_patterns.paragraph_count }} +
+ {% endif %} + {% if post_type_analysis.structure_patterns.paragraph_length %} +
+ Absatzlänge + {{ post_type_analysis.structure_patterns.paragraph_length }} +
+ {% endif %} + {% if post_type_analysis.structure_patterns.uses_lists is defined %} +
+ Listen + {{ 'Ja' if post_type_analysis.structure_patterns.uses_lists else 'Nein' }} +
+ {% endif %} + {% if post_type_analysis.structure_patterns.list_style %} +
+ Listen-Stil + {{ post_type_analysis.structure_patterns.list_style }} +
+ {% endif %} +
+ {% if post_type_analysis.structure_patterns.structure_template %} +
+ Struktur-Vorlage +
{{ post_type_analysis.structure_patterns.structure_template }}
+
+ {% endif %} +
+ {% endif %} + + + {% if post_type_analysis.language_style %} +
+

Sprachstil

+
+ {% if post_type_analysis.language_style.tone %} +
+ Tonalität + {{ post_type_analysis.language_style.tone }} +
+ {% endif %} + {% if post_type_analysis.language_style.perspective %} +
+ Perspektive + {{ post_type_analysis.language_style.perspective }} +
+ {% endif %} + {% if post_type_analysis.language_style.energy_level %} +
+ Energie-Level +
+
+
+
+ {{ post_type_analysis.language_style.energy_level }}/10 +
+
+ {% endif %} + {% if post_type_analysis.language_style.formality %} +
+ Formalität + {{ post_type_analysis.language_style.formality }} +
+ {% endif %} + {% if post_type_analysis.language_style.sentence_types %} +
+ Satz-Typen + {{ post_type_analysis.language_style.sentence_types }} +
+ {% endif %} +
+ + {% if post_type_analysis.language_style.typical_sentence_starters %} +
+ Typische Satzanfänge +
+ {% for starter in post_type_analysis.language_style.typical_sentence_starters[:8] %} + "{{ starter }}" + {% endfor %} +
+
+ {% endif %} + + {% if post_type_analysis.language_style.signature_phrases %} +
+ Signature Phrases +
+ {% for phrase in post_type_analysis.language_style.signature_phrases[:5] %} +

"{{ phrase }}"

+ {% endfor %} +
+
+ {% endif %} +
+ {% endif %} + + + {% if post_type_analysis.hooks %} +
+

Hook-Muster

+ {% if post_type_analysis.hooks.hook_types %} +
+ {% for hook_type in post_type_analysis.hooks.hook_types %} + {{ hook_type }} + {% endfor %} +
+ {% endif %} + + {% if post_type_analysis.hooks.real_examples %} +
+ {% for example in post_type_analysis.hooks.real_examples[:4] %} +
+
+ {{ example.type or 'Hook' }} + {% if example.why_effective %} + {{ example.why_effective }} + {% endif %} +
+

"{{ example.hook }}"

+
+ {% endfor %} +
+ {% endif %} + + {% if post_type_analysis.hooks.average_hook_length %} +
+ Durchschnittliche Hook-Länge: {{ post_type_analysis.hooks.average_hook_length }} +
+ {% endif %} +
+ {% endif %} + + + {% if post_type_analysis.ctas %} +
+

CTA-Muster

+ {% if post_type_analysis.ctas.cta_types %} +
+ {% for cta_type in post_type_analysis.ctas.cta_types %} + {{ cta_type }} + {% endfor %} +
+ {% endif %} + + {% if post_type_analysis.ctas.real_examples %} +
+ {% for example in post_type_analysis.ctas.real_examples[:4] %} +
+ {{ example.type or 'CTA' }} +

"{{ example.cta }}"

+
+ {% endfor %} +
+ {% endif %} + +
+ {% if post_type_analysis.ctas.cta_position %} +
+ Position + {{ post_type_analysis.ctas.cta_position }} +
+ {% endif %} + {% if post_type_analysis.ctas.cta_intensity %} +
+ Intensität + {{ post_type_analysis.ctas.cta_intensity }} +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis.visual_patterns %} +
+

Visuelle Elemente

+
+ {% if post_type_analysis.visual_patterns.emoji_usage %} +
+ Emoji-Häufigkeit + {{ post_type_analysis.visual_patterns.emoji_usage.frequency or 'mittel' }} +
+ {% if post_type_analysis.visual_patterns.emoji_usage.typical_emojis %} +
+ Typische Emojis + {{ post_type_analysis.visual_patterns.emoji_usage.typical_emojis | join(' ') }} +
+ {% endif %} + {% if post_type_analysis.visual_patterns.emoji_usage.placement %} +
+ Platzierung + {{ post_type_analysis.visual_patterns.emoji_usage.placement }} +
+ {% endif %} + {% endif %} + {% if post_type_analysis.visual_patterns.formatting %} +
+ Formatierung + {{ post_type_analysis.visual_patterns.formatting }} +
+ {% endif %} + {% if post_type_analysis.visual_patterns.whitespace %} +
+ Whitespace + {{ post_type_analysis.visual_patterns.whitespace }} +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis.length_patterns %} +
+

Längen-Muster

+
+ {% if post_type_analysis.length_patterns.ideal_length %} +
+ Ideale Länge + {{ post_type_analysis.length_patterns.ideal_length }} +
+ {% endif %} + {% if post_type_analysis.length_patterns.average_words %} +
+ Durchschnitt + {{ post_type_analysis.length_patterns.average_words }} Wörter +
+ {% endif %} + {% if post_type_analysis.length_patterns.range %} +
+ Range + {{ post_type_analysis.length_patterns.range }} Wörter +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis.content_focus %} +
+

Inhaltlicher Fokus

+ {% if post_type_analysis.content_focus.main_themes %} +
+ {% for theme in post_type_analysis.content_focus.main_themes %} + {{ theme }} + {% endfor %} +
+ {% endif %} +
+ {% if post_type_analysis.content_focus.value_proposition %} +
+ Mehrwert + {{ post_type_analysis.content_focus.value_proposition }} +
+ {% endif %} + {% if post_type_analysis.content_focus.target_emotion %} +
+ Ziel-Emotion + {{ post_type_analysis.content_focus.target_emotion }} +
+ {% endif %} +
+
+ {% endif %} + + + {% if post_type_analysis.recurring_elements %} +
+

Wiederkehrende Elemente

+ {% if post_type_analysis.recurring_elements.phrases %} +
+ Wiederkehrende Phrasen +
+ {% for phrase in post_type_analysis.recurring_elements.phrases[:8] %} + "{{ phrase }}" + {% endfor %} +
+
+ {% endif %} + {% if post_type_analysis.recurring_elements.transitions %} +
+ Typische Übergänge +
+ {% for transition in post_type_analysis.recurring_elements.transitions[:4] %} +

"{{ transition }}"

+ {% endfor %} +
+
+ {% endif %} + {% if post_type_analysis.recurring_elements.closings %} +
+ Schlussformulierungen +
+ {% for closing in post_type_analysis.recurring_elements.closings[:4] %} +

"{{ closing }}"

+ {% endfor %} +
+
+ {% endif %} +
+ {% endif %} + + + {% if post_type_analysis.writing_guidelines %} +
+

Schreib-Richtlinien

+
+ {% if post_type_analysis.writing_guidelines.dos %} +
+ DO +
    + {% for do in post_type_analysis.writing_guidelines.dos %} +
  • + + {{ do }} +
  • + {% endfor %} +
+
+ {% endif %} + {% if post_type_analysis.writing_guidelines.donts %} +
+ DON'T +
    + {% for dont in post_type_analysis.writing_guidelines.donts %} +
  • + + {{ dont }} +
  • + {% endfor %} +
+
+ {% endif %} +
+ {% if post_type_analysis.writing_guidelines.key_success_factors %} +
+ Erfolgsfaktoren +
    + {% for factor in post_type_analysis.writing_guidelines.key_success_factors %} +
  • + + {{ factor }} +
  • + {% endfor %} +
+
+ {% endif %} +
+ {% endif %} +
+
+ {% endif %} + + + +{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/posts.html b/src/web/templates/posts.html new file mode 100644 index 0000000..5047746 --- /dev/null +++ b/src/web/templates/posts.html @@ -0,0 +1,152 @@ +{% extends "base.html" %} +{% block title %}Alle Posts - LinkedIn Posts{% endblock %} + +{% block head %} + +{% endblock %} + +{% block content %} +
+
+

Alle Posts

+

{{ total_posts }} generierte Posts

+
+ + + Neuer Post + +
+ +{% if error %} +
+ Error: {{ error }} +
+{% endif %} + +{% if customers_with_posts %} +
+ {% for item in customers_with_posts %} + {% if item.posts %} + + {% endif %} + {% endfor %} +
+{% else %} +
+
+ +
+

Noch keine Posts

+

Erstelle deinen ersten LinkedIn Post mit KI-Unterstützung.

+ + + Post erstellen + +
+{% endif %} +{% endblock %} diff --git a/src/web/templates/research.html b/src/web/templates/research.html new file mode 100644 index 0000000..3a4206f --- /dev/null +++ b/src/web/templates/research.html @@ -0,0 +1,215 @@ +{% extends "base.html" %} +{% block title %}Research Topics - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Research Topics

+

Recherchiere neue Content-Themen mit Perplexity AI

+
+ +
+ +
+
+
+ + +
+ + + + + + + + +
+ + {% if not customers %} +
+

Noch keine Kunden vorhanden. Erstelle zuerst einen Kunden.

+
+ {% endif %} +
+ + +
+
+

Gefundene Topics

+
+

Starte eine Recherche um Topics zu finden...

+
+
+
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/scraped_posts.html b/src/web/templates/scraped_posts.html new file mode 100644 index 0000000..9cba7cc --- /dev/null +++ b/src/web/templates/scraped_posts.html @@ -0,0 +1,571 @@ +{% extends "base.html" %} +{% block title %}Gescrapte Posts - LinkedIn Posts{% endblock %} + +{% block head %} + +{% endblock %} + +{% block content %} +
+
+
+

Gescrapte Posts verwalten

+

Posts manuell kategorisieren und Post-Typ-Analyse triggern

+
+
+
+ + +
+
+
+ + +
+
+ + +
+
+
+ + + + + + + + + + + + + +{% if not customers %} +
+

Noch keine Kunden vorhanden. Erstelle zuerst einen Kunden.

+
+{% endif %} + + + +{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/status.html b/src/web/templates/status.html new file mode 100644 index 0000000..0471d51 --- /dev/null +++ b/src/web/templates/status.html @@ -0,0 +1,155 @@ +{% extends "base.html" %} +{% block title %}Status - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Status

+

Übersicht über alle Kunden und deren Setup-Status

+
+ +{% if error %} +
+ Error: {{ error }} +
+{% endif %} + +{% if customer_statuses %} +
+ {% for item in customer_statuses %} +
+ +
+
+
+
+ {% if item.profile_picture %} + {{ item.customer.name }} + {% else %} + {{ item.customer.name[0] | upper }} + {% endif %} +
+
+

{{ item.customer.name }}

+

{{ item.customer.company_name or 'Kein Unternehmen' }}

+
+
+
+ {% if item.status.ready_for_posts %} + + + Bereit für Posts + + {% else %} + + + Setup unvollständig + + {% endif %} +
+
+
+ + +
+
+ +
+
+ {% if item.status.has_scraped_posts %} + + {% else %} + + {% endif %} + Scraped Posts +
+

{{ item.status.scraped_posts_count }}

+
+ + +
+
+ {% if item.status.has_profile_analysis %} + + {% else %} + + {% endif %} + Profil Analyse +
+

{{ 'Vorhanden' if item.status.has_profile_analysis else 'Fehlt' }}

+
+ + +
+
+ {% if item.status.research_count > 0 %} + + {% else %} + + {% endif %} + Research Topics +
+

{{ item.status.research_count }}

+
+ + +
+
+ + Generierte Posts +
+

{{ item.status.posts_count }}

+
+
+ + + {% if item.status.missing_items %} +
+

+ + Fehlende Elemente +

+
    + {% for item_missing in item.status.missing_items %} +
  • + + {{ item_missing }} +
  • + {% endfor %} +
+
+ {% endif %} + + +
+ {% if not item.status.has_profile_analysis %} + + Setup wiederholen + + {% endif %} + {% if item.status.research_count == 0 %} + + Recherche starten + + {% endif %} + {% if item.status.ready_for_posts %} + + Post erstellen + + {% endif %} +
+
+
+ {% endfor %} +
+{% else %} +
+ +

Noch keine Kunden

+

Erstelle deinen ersten Kunden, um den Status zu sehen.

+ + + Neuer Kunde + +
+{% endif %} +{% endblock %} diff --git a/src/web/templates/user/auth_callback.html b/src/web/templates/user/auth_callback.html new file mode 100644 index 0000000..f51cdf8 --- /dev/null +++ b/src/web/templates/user/auth_callback.html @@ -0,0 +1,45 @@ + + + + + + Anmeldung... - LinkedIn Posts + + + + +
+
+

Anmeldung wird verarbeitet...

+

Du wirst gleich weitergeleitet.

+
+ + + + diff --git a/src/web/templates/user/base.html b/src/web/templates/user/base.html new file mode 100644 index 0000000..adfdd81 --- /dev/null +++ b/src/web/templates/user/base.html @@ -0,0 +1,113 @@ + + + + + + {% block title %}LinkedIn Posts{% endblock %} + + + + {% block head %}{% endblock %} + + + + + + +
+
+ {% block content %}{% endblock %} +
+
+ + {% block scripts %}{% endblock %} + + diff --git a/src/web/templates/user/create_post.html b/src/web/templates/user/create_post.html new file mode 100644 index 0000000..6165960 --- /dev/null +++ b/src/web/templates/user/create_post.html @@ -0,0 +1,479 @@ +{% extends "base.html" %} +{% block title %}Post erstellen - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Post erstellen

+

Generiere einen neuen LinkedIn Post mit AI

+
+ +
+ +
+
+ + + + +
+ +
+

Lade Topics...

+
+
+ + +
+ +
+ + + +
+
+ + + + + +
+
+ + +
+
+

Generierter Post

+ + + + +
+

Wähle ein Topic aus oder gib ein eigenes ein, dann klicke auf "Post generieren"...

+
+
+
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/user/dashboard.html b/src/web/templates/user/dashboard.html new file mode 100644 index 0000000..f7a4b46 --- /dev/null +++ b/src/web/templates/user/dashboard.html @@ -0,0 +1,76 @@ +{% extends "base.html" %} +{% block title %}Dashboard - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Dashboard

+

Willkommen zurück, {{ session.linkedin_name or session.customer_name }}!

+
+ +{% if error %} +
+ Error: {{ error }} +
+{% endif %} + + +
+
+
+
+ +
+
+

Generierte Posts

+

{{ total_posts or 0 }}

+
+
+
+ +
+
+
+ +
+
+

AI Agents

+

5

+
+
+
+
+ + + +{% endblock %} diff --git a/src/web/templates/user/login.html b/src/web/templates/user/login.html new file mode 100644 index 0000000..ac78fa4 --- /dev/null +++ b/src/web/templates/user/login.html @@ -0,0 +1,75 @@ + + + + + + Login - LinkedIn Posts + + + + + +
+
+
+ Logo +

LinkedIn Posts

+

AI Workflow System

+
+ + {% if error %} +
+ {% if error == 'access_denied' %} + Zugriff verweigert. Bitte versuche es erneut. + {% elif error == 'unauthorized' %} + Dein LinkedIn-Profil ist nicht autorisiert. + {% else %} + Fehler bei der Anmeldung: {{ error }} + {% endif %} +
+ {% endif %} + +
+ + + + + Mit LinkedIn anmelden + + +

+ Melde dich mit deinem LinkedIn-Konto an, um auf das Dashboard zuzugreifen. +

+
+
+ + +
+ + diff --git a/src/web/templates/user/not_authorized.html b/src/web/templates/user/not_authorized.html new file mode 100644 index 0000000..bd7ac11 --- /dev/null +++ b/src/web/templates/user/not_authorized.html @@ -0,0 +1,40 @@ + + + + + + Nicht autorisiert - LinkedIn Posts + + + + +
+
+
+ + + +
+ +

Nicht autorisiert

+ +

+ Dein LinkedIn-Profil ist nicht mit einem Kundenkonto verknüpft. + Bitte kontaktiere den Administrator, um Zugang zu erhalten. +

+ + +
+
+ + diff --git a/src/web/templates/user/post_detail.html b/src/web/templates/user/post_detail.html new file mode 100644 index 0000000..dbf66d6 --- /dev/null +++ b/src/web/templates/user/post_detail.html @@ -0,0 +1,698 @@ +{% extends "base.html" %} +{% block title %}{{ post.topic_title }} - Post Details{% endblock %} + +{% block head %} + +{% endblock %} + +{% block content %} + +
+ + + Zurück zu meinen Posts + +
+
+

+ {{ post.topic_title or 'Untitled Post' }} +

+
+ {{ post.created_at.strftime('%d.%m.%Y um %H:%M Uhr') if post.created_at else 'N/A' }} + | + {{ post.iterations }} Iteration{{ 's' if post.iterations != 1 else '' }} +
+
+
+ + {{ post.status | capitalize }} + + {% if final_feedback %} + + Score: {{ final_feedback.overall_score }}/100 + + {% endif %} +
+
+
+ + +
+ +
+ + +
+
+ +
+
+
+

+ + Finaler Post +

+
+ +
+ + +
+ +
+
+ + +
+
+
+ {% if profile_picture_url %} + {{ session.linkedin_name }} + {% else %} + {{ session.linkedin_name[:2] | upper if session.linkedin_name else 'UN' }} + {% endif %} +
+ +
+ + + +
+
+ +
...mehr anzeigen
+
+ + + + + + + 42 + 12 Kommentare • 3 Reposts +
+
+ + + + +
+
+ + + +
+
+ + +
+ + {% if final_feedback and final_feedback.scores %} +
+

+ + Score-Aufschlüsselung +

+
+
+
+ Authentizität & Stil + {{ final_feedback.scores.authenticity_and_style }}/40 +
+
+
+
+
+
+
+ Content-Qualität + {{ final_feedback.scores.content_quality }}/35 +
+
+
+
+
+
+
+ Technische Umsetzung + {{ final_feedback.scores.technical_execution }}/25 +
+
+
+
+
+
+
+ Gesamt + {{ final_feedback.overall_score }}/100 +
+
+
+
+ {% endif %} + + + {% if final_feedback %} +
+

+ + Finales Feedback +

+

{{ final_feedback.feedback }}

+ {% if final_feedback.strengths %} +
+ Stärken +
    + {% for s in final_feedback.strengths %} +
  • + {{ s }}
  • + {% endfor %} +
+
+ {% endif %} +
+ {% endif %} + + +
+

Aktionen

+
+ + + + Neuen Post erstellen + +
+
+
+
+
+ + +{% if post.writer_versions and post.writer_versions | length > 0 %} +
+
+
+

+ + Iterationen +

+ +
+ + + 1 / {{ post.writer_versions | length }} + + +
+
+ + + {% for i in range(post.writer_versions | length) %} +
+
+ Version {{ i + 1 }} + {% if post.critic_feedback and i < post.critic_feedback | length %} +
+ + Score: {{ post.critic_feedback[i].overall_score }}/100 + + {% if post.critic_feedback[i].approved %} + Approved + {% endif %} +
+ {% endif %} +
+ +
+
{{ post.writer_versions[i] }}
+
+ + {% if post.critic_feedback and i < post.critic_feedback | length %} + {% set fb = post.critic_feedback[i] %} +
+

+ + Critic Feedback +

+

{{ fb.feedback }}

+
+ {% if fb.strengths %} +
+ Stärken +
    + {% for s in fb.strengths %} +
  • + {{ s }}
  • + {% endfor %} +
+
+ {% endif %} + {% if fb.improvements %} +
+ Verbesserungen +
    + {% for imp in fb.improvements %} +
  • - {{ imp }}
  • + {% endfor %} +
+
+ {% endif %} +
+
+ {% endif %} +
+ {% endfor %} +
+
+{% endif %} +{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/user/posts.html b/src/web/templates/user/posts.html new file mode 100644 index 0000000..f26f0a1 --- /dev/null +++ b/src/web/templates/user/posts.html @@ -0,0 +1,114 @@ +{% extends "base.html" %} +{% block title %}Meine Posts - LinkedIn Posts{% endblock %} + +{% block head %} + +{% endblock %} + +{% block content %} +
+
+

Meine Posts

+

{{ total_posts }} generierte Posts

+
+ + + Neuer Post + +
+ +{% if error %} +
+ Error: {{ error }} +
+{% endif %} + +{% if posts %} + +{% else %} +
+
+ +
+

Noch keine Posts

+

Erstelle deinen ersten LinkedIn Post mit KI-Unterstützung.

+ + + Post erstellen + +
+{% endif %} +{% endblock %} diff --git a/src/web/templates/user/research.html b/src/web/templates/user/research.html new file mode 100644 index 0000000..da720ad --- /dev/null +++ b/src/web/templates/user/research.html @@ -0,0 +1,185 @@ +{% extends "base.html" %} +{% block title %}Research Topics - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Research Topics

+

Recherchiere neue Content-Themen mit Perplexity AI

+
+ +
+ +
+
+ +
+ +
+
Lade Post-Typen...
+
+

Wähle einen Post-Typ für gezielte Recherche oder lasse leer für allgemeine Recherche.

+ +
+ + + + + +
+
+ + +
+
+

Gefundene Topics

+
+

Starte eine Recherche um Topics zu finden...

+
+
+
+
+{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/src/web/templates/user/status.html b/src/web/templates/user/status.html new file mode 100644 index 0000000..d4b5298 --- /dev/null +++ b/src/web/templates/user/status.html @@ -0,0 +1,142 @@ +{% extends "base.html" %} +{% block title %}Status - LinkedIn Posts{% endblock %} + +{% block content %} +
+

Status

+

Übersicht über deinen Setup-Status

+
+ +{% if error %} +
+ Error: {{ error }} +
+{% endif %} + +{% if status %} +
+ +
+
+
+
+ {% if profile_picture %} + {{ customer.name }} + {% else %} + {{ customer.name[0] | upper }} + {% endif %} +
+
+

{{ customer.name }}

+

{{ customer.company_name or 'Kein Unternehmen' }}

+
+
+
+ {% if status.ready_for_posts %} + + + Bereit für Posts + + {% else %} + + + Setup unvollständig + + {% endif %} +
+
+
+ + +
+
+ +
+
+ {% if status.has_scraped_posts %} + + {% else %} + + {% endif %} + Scraped Posts +
+

{{ status.scraped_posts_count }}

+
+ + +
+
+ {% if status.has_profile_analysis %} + + {% else %} + + {% endif %} + Profil Analyse +
+

{{ 'Vorhanden' if status.has_profile_analysis else 'Fehlt' }}

+
+ + +
+
+ {% if status.research_count > 0 %} + + {% else %} + + {% endif %} + Research Topics +
+

{{ status.research_count }}

+
+ + +
+
+ + Generierte Posts +
+

{{ status.posts_count }}

+
+
+ + + {% if status.missing_items %} +
+

+ + Fehlende Elemente +

+
    + {% for item in status.missing_items %} +
  • + + {{ item }} +
  • + {% endfor %} +
+
+ {% endif %} + + +
+ {% if status.research_count == 0 %} + + Recherche starten + + {% endif %} + {% if status.ready_for_posts %} + + Post erstellen + + {% endif %} +
+
+
+{% else %} +
+ +

Status nicht verfügbar

+

Es konnte kein Status geladen werden.

+
+{% endif %} +{% endblock %} diff --git a/src/web/user/__init__.py b/src/web/user/__init__.py new file mode 100644 index 0000000..1edf025 --- /dev/null +++ b/src/web/user/__init__.py @@ -0,0 +1,4 @@ +"""User frontend module.""" +from src.web.user.routes import user_router + +__all__ = ["user_router"] diff --git a/src/web/user/auth.py b/src/web/user/auth.py new file mode 100644 index 0000000..30e1ea3 --- /dev/null +++ b/src/web/user/auth.py @@ -0,0 +1,348 @@ +"""User authentication with Supabase LinkedIn OAuth.""" +import re +import secrets +from typing import Optional +from uuid import UUID + +from fastapi import Request, Response +from loguru import logger + +from src.config import settings +from src.database import db + +# Session management +USER_SESSION_COOKIE = "linkedin_user_session" +SESSION_SECRET = settings.session_secret or secrets.token_hex(32) + + +def normalize_linkedin_url(url: str) -> str: + """Normalize LinkedIn URL for comparison. + + Extracts the username/vanityName from various LinkedIn URL formats. + """ + if not url: + return "" + # Match linkedin.com/in/username with optional trailing slash or query params + match = re.search(r'linkedin\.com/in/([^/?]+)', url.lower()) + if match: + return match.group(1).rstrip('/') + return url.lower().strip() + + +async def get_customer_by_vanity_name(vanity_name: str) -> Optional[dict]: + """Find customer by LinkedIn vanityName. + + Constructs the LinkedIn URL from vanityName and matches against + Customer.linkedin_url (normalized). + """ + if not vanity_name: + return None + + normalized_vanity = normalize_linkedin_url(f"https://www.linkedin.com/in/{vanity_name}/") + + # Get all customers and match + customers = await db.list_customers() + for customer in customers: + customer_vanity = normalize_linkedin_url(customer.linkedin_url) + if customer_vanity == normalized_vanity: + return { + "id": str(customer.id), + "name": customer.name, + "linkedin_url": customer.linkedin_url, + "company_name": customer.company_name, + "email": customer.email + } + + return None + + +async def get_customer_by_email(email: str) -> Optional[dict]: + """Find customer by email address. + + Fallback matching when LinkedIn vanityName is not available. + """ + if not email: + return None + + email_lower = email.lower().strip() + + # Get all customers and match by email + customers = await db.list_customers() + for customer in customers: + if customer.email and customer.email.lower().strip() == email_lower: + return { + "id": str(customer.id), + "name": customer.name, + "linkedin_url": customer.linkedin_url, + "company_name": customer.company_name, + "email": customer.email + } + + return None + + +async def get_customer_by_name(name: str) -> Optional[dict]: + """Find customer by name. + + Fallback matching when email is not available. + Tries exact match first, then case-insensitive. + """ + if not name: + return None + + name_lower = name.lower().strip() + + # Get all customers and match by name + customers = await db.list_customers() + + # First try exact match + for customer in customers: + if customer.name == name: + return { + "id": str(customer.id), + "name": customer.name, + "linkedin_url": customer.linkedin_url, + "company_name": customer.company_name, + "email": customer.email + } + + # Then try case-insensitive + for customer in customers: + if customer.name.lower().strip() == name_lower: + return { + "id": str(customer.id), + "name": customer.name, + "linkedin_url": customer.linkedin_url, + "company_name": customer.company_name, + "email": customer.email + } + + return None + + +class UserSession: + """User session data.""" + + def __init__( + self, + customer_id: str, + customer_name: str, + linkedin_vanity_name: str, + linkedin_name: Optional[str] = None, + linkedin_picture: Optional[str] = None, + email: Optional[str] = None + ): + self.customer_id = customer_id + self.customer_name = customer_name + self.linkedin_vanity_name = linkedin_vanity_name + self.linkedin_name = linkedin_name + self.linkedin_picture = linkedin_picture + self.email = email + + def to_cookie_value(self) -> str: + """Serialize session to cookie value.""" + import json + import hashlib + + data = { + "customer_id": self.customer_id, + "customer_name": self.customer_name, + "linkedin_vanity_name": self.linkedin_vanity_name, + "linkedin_name": self.linkedin_name, + "linkedin_picture": self.linkedin_picture, + "email": self.email + } + + # Create signed cookie value + json_data = json.dumps(data) + signature = hashlib.sha256(f"{json_data}{SESSION_SECRET}".encode()).hexdigest()[:16] + + import base64 + encoded = base64.b64encode(json_data.encode()).decode() + return f"{encoded}.{signature}" + + @classmethod + def from_cookie_value(cls, cookie_value: str) -> Optional["UserSession"]: + """Deserialize session from cookie value.""" + import json + import hashlib + import base64 + + try: + parts = cookie_value.split(".") + if len(parts) != 2: + return None + + encoded, signature = parts + json_data = base64.b64decode(encoded.encode()).decode() + + # Verify signature + expected_sig = hashlib.sha256(f"{json_data}{SESSION_SECRET}".encode()).hexdigest()[:16] + if signature != expected_sig: + logger.warning("Invalid session signature") + return None + + data = json.loads(json_data) + return cls( + customer_id=data["customer_id"], + customer_name=data["customer_name"], + linkedin_vanity_name=data["linkedin_vanity_name"], + linkedin_name=data.get("linkedin_name"), + linkedin_picture=data.get("linkedin_picture"), + email=data.get("email") + ) + except Exception as e: + logger.error(f"Failed to parse session cookie: {e}") + return None + + +def get_user_session(request: Request) -> Optional[UserSession]: + """Get user session from request cookies.""" + cookie = request.cookies.get(USER_SESSION_COOKIE) + if not cookie: + return None + return UserSession.from_cookie_value(cookie) + + +def set_user_session(response: Response, session: UserSession) -> None: + """Set user session cookie.""" + response.set_cookie( + key=USER_SESSION_COOKIE, + value=session.to_cookie_value(), + httponly=True, + max_age=60 * 60 * 24 * 7, # 7 days + samesite="lax" + ) + + +def clear_user_session(response: Response) -> None: + """Clear user session cookie.""" + response.delete_cookie(USER_SESSION_COOKIE) + + +async def handle_oauth_callback( + access_token: str, + refresh_token: Optional[str] = None +) -> Optional[UserSession]: + """Handle OAuth callback from Supabase. + + 1. Get user info from Supabase using access token + 2. Extract LinkedIn vanityName from user metadata + 3. Match with Customer record + 4. Create session if match found + + Returns UserSession if authorized, None if not. + """ + from supabase import create_client + + try: + # Create a new client with the user's access token + supabase = create_client(settings.supabase_url, settings.supabase_key) + + # Get user info using the access token + user_response = supabase.auth.get_user(access_token) + + if not user_response or not user_response.user: + logger.error("Failed to get user from Supabase") + return None + + user = user_response.user + user_metadata = user.user_metadata or {} + + # Debug: Log full response + import json + logger.info(f"=== FULL OAUTH RESPONSE ===") + logger.info(f"user.id: {user.id}") + logger.info(f"user.email: {user.email}") + logger.info(f"user.phone: {user.phone}") + logger.info(f"user.app_metadata: {json.dumps(user.app_metadata, indent=2)}") + logger.info(f"user.user_metadata: {json.dumps(user.user_metadata, indent=2)}") + logger.info(f"--- Einzelne Felder ---") + logger.info(f"given_name: {user_metadata.get('given_name')}") + logger.info(f"family_name: {user_metadata.get('family_name')}") + logger.info(f"name: {user_metadata.get('name')}") + logger.info(f"email (metadata): {user_metadata.get('email')}") + logger.info(f"picture: {user_metadata.get('picture')}") + logger.info(f"sub: {user_metadata.get('sub')}") + logger.info(f"provider_id: {user_metadata.get('provider_id')}") + logger.info(f"=== END OAUTH RESPONSE ===") + + # LinkedIn OIDC provides these fields + vanity_name = user_metadata.get("vanityName") # LinkedIn username (often not provided) + name = user_metadata.get("name") + picture = user_metadata.get("picture") + email = user.email + + logger.info(f"OAuth callback for user: {name} (vanityName={vanity_name}, email={email})") + + # Try to match with customer + customer = None + + # First try vanityName if available + if vanity_name: + customer = await get_customer_by_vanity_name(vanity_name) + if customer: + logger.info(f"Matched by vanityName: {vanity_name}") + + # Fallback to email matching + if not customer and email: + customer = await get_customer_by_email(email) + if customer: + logger.info(f"Matched by email: {email}") + + # Fallback to name matching + if not customer and name: + customer = await get_customer_by_name(name) + if customer: + logger.info(f"Matched by name: {name}") + + if not customer: + # Debug: List all customers to help diagnose + all_customers = await db.list_customers() + logger.warning(f"No customer found for LinkedIn user: {name} (email={email}, vanityName={vanity_name})") + logger.warning(f"Available customers:") + for c in all_customers: + logger.warning(f" - {c.name}: email={c.email}, linkedin={c.linkedin_url}") + return None + + logger.info(f"User {name} matched with customer {customer['name']}") + + # Use vanityName from OAuth or extract from customer's linkedin_url + effective_vanity_name = vanity_name + if not effective_vanity_name and customer.get("linkedin_url"): + effective_vanity_name = normalize_linkedin_url(customer["linkedin_url"]) + + return UserSession( + customer_id=customer["id"], + customer_name=customer["name"], + linkedin_vanity_name=effective_vanity_name or "", + linkedin_name=name, + linkedin_picture=picture, + email=email + ) + + except Exception as e: + logger.exception(f"OAuth callback error: {e}") + return None + + +def get_supabase_login_url(redirect_to: str) -> str: + """Generate Supabase OAuth login URL for LinkedIn. + + Args: + redirect_to: The URL to redirect to after OAuth (the callback endpoint) + + Returns: + The Supabase OAuth URL to redirect the user to + """ + from urllib.parse import urlencode + + # Supabase OAuth endpoint + base_url = f"{settings.supabase_url}/auth/v1/authorize" + + params = { + "provider": "linkedin_oidc", + "redirect_to": redirect_to + } + + return f"{base_url}?{urlencode(params)}" diff --git a/src/web/user/routes.py b/src/web/user/routes.py new file mode 100644 index 0000000..8635925 --- /dev/null +++ b/src/web/user/routes.py @@ -0,0 +1,464 @@ +"""User frontend routes (LinkedIn OAuth protected).""" +import asyncio +import json +from pathlib import Path +from typing import Optional +from uuid import UUID + +from fastapi import APIRouter, Request, Form, BackgroundTasks, HTTPException +from fastapi.templating import Jinja2Templates +from fastapi.responses import HTMLResponse, RedirectResponse +from pydantic import BaseModel +from loguru import logger + +from src.config import settings +from src.database import db +from src.orchestrator import orchestrator +from src.web.user.auth import ( + get_user_session, set_user_session, clear_user_session, + get_supabase_login_url, handle_oauth_callback, UserSession +) + +# Router for user frontend +user_router = APIRouter(tags=["user"]) + +# Templates +templates = Jinja2Templates(directory=Path(__file__).parent.parent / "templates" / "user") + +# Store for progress updates +progress_store = {} + + +async def get_customer_profile_picture(customer_id: UUID) -> Optional[str]: + """Get profile picture URL from customer's LinkedIn posts.""" + linkedin_posts = await db.get_linkedin_posts(customer_id) + for lp in linkedin_posts: + if lp.raw_data and isinstance(lp.raw_data, dict): + author = lp.raw_data.get("author", {}) + if author and isinstance(author, dict): + profile_picture_url = author.get("profile_picture") + if profile_picture_url: + return profile_picture_url + return None + + +def require_user_session(request: Request) -> Optional[UserSession]: + """Check if user is authenticated, redirect to login if not.""" + session = get_user_session(request) + if not session: + return None + return session + + +# ==================== AUTH ROUTES ==================== + +@user_router.get("/login", response_class=HTMLResponse) +async def login_page(request: Request, error: str = None): + """User login page with LinkedIn OAuth button.""" + # If already logged in, redirect to dashboard + session = get_user_session(request) + if session: + return RedirectResponse(url="/", status_code=302) + + return templates.TemplateResponse("login.html", { + "request": request, + "error": error + }) + + +@user_router.get("/auth/linkedin") +async def start_oauth(request: Request): + """Start LinkedIn OAuth flow via Supabase.""" + # Build callback URL + callback_url = settings.supabase_redirect_url + if not callback_url: + # Fallback to constructing from request + callback_url = str(request.url_for("oauth_callback")) + + login_url = get_supabase_login_url(callback_url) + return RedirectResponse(url=login_url, status_code=302) + + +@user_router.get("/auth/callback") +async def oauth_callback( + request: Request, + access_token: str = None, + refresh_token: str = None, + error: str = None, + error_description: str = None +): + """Handle OAuth callback from Supabase.""" + if error: + logger.error(f"OAuth error: {error} - {error_description}") + return RedirectResponse(url=f"/login?error={error}", status_code=302) + + # Supabase returns tokens in URL hash, not query params + # We need to handle this client-side and redirect back + # Check if we have the tokens + if not access_token: + # Render a page that extracts hash params and redirects + return templates.TemplateResponse("auth_callback.html", { + "request": request + }) + + # We have the tokens, try to authenticate + session = await handle_oauth_callback(access_token, refresh_token) + + if not session: + return RedirectResponse(url="/not-authorized", status_code=302) + + # Success - set session and redirect to dashboard + response = RedirectResponse(url="/", status_code=302) + set_user_session(response, session) + return response + + +@user_router.get("/logout") +async def logout(request: Request): + """Log out user.""" + response = RedirectResponse(url="/login", status_code=302) + clear_user_session(response) + return response + + +@user_router.get("/not-authorized", response_class=HTMLResponse) +async def not_authorized_page(request: Request): + """Page shown when user's LinkedIn profile doesn't match any customer.""" + return templates.TemplateResponse("not_authorized.html", { + "request": request + }) + + +# ==================== PROTECTED PAGES ==================== + +@user_router.get("/", response_class=HTMLResponse) +async def dashboard(request: Request): + """User dashboard - shows only their own stats.""" + session = require_user_session(request) + if not session: + return RedirectResponse(url="/login", status_code=302) + + try: + customer_id = UUID(session.customer_id) + customer = await db.get_customer(customer_id) + posts = await db.get_generated_posts(customer_id) + profile_picture = session.linkedin_picture or await get_customer_profile_picture(customer_id) + + return templates.TemplateResponse("dashboard.html", { + "request": request, + "page": "home", + "session": session, + "customer": customer, + "total_posts": len(posts), + "profile_picture": profile_picture + }) + except Exception as e: + logger.error(f"Error loading dashboard: {e}") + return templates.TemplateResponse("dashboard.html", { + "request": request, + "page": "home", + "session": session, + "error": str(e) + }) + + +@user_router.get("/posts", response_class=HTMLResponse) +async def posts_page(request: Request): + """View user's own posts.""" + session = require_user_session(request) + if not session: + return RedirectResponse(url="/login", status_code=302) + + try: + customer_id = UUID(session.customer_id) + customer = await db.get_customer(customer_id) + posts = await db.get_generated_posts(customer_id) + profile_picture = session.linkedin_picture or await get_customer_profile_picture(customer_id) + + return templates.TemplateResponse("posts.html", { + "request": request, + "page": "posts", + "session": session, + "customer": customer, + "posts": posts, + "total_posts": len(posts), + "profile_picture": profile_picture + }) + except Exception as e: + logger.error(f"Error loading posts: {e}") + return templates.TemplateResponse("posts.html", { + "request": request, + "page": "posts", + "session": session, + "posts": [], + "total_posts": 0, + "error": str(e) + }) + + +@user_router.get("/posts/{post_id}", response_class=HTMLResponse) +async def post_detail_page(request: Request, post_id: str): + """Detailed view of a single post.""" + session = require_user_session(request) + if not session: + return RedirectResponse(url="/login", status_code=302) + + try: + post = await db.get_generated_post(UUID(post_id)) + if not post: + return RedirectResponse(url="/posts", status_code=302) + + # Verify user owns this post + if str(post.customer_id) != session.customer_id: + return RedirectResponse(url="/posts", status_code=302) + + customer = await db.get_customer(post.customer_id) + linkedin_posts = await db.get_linkedin_posts(post.customer_id) + reference_posts = [p.post_text for p in linkedin_posts if p.post_text and len(p.post_text) > 100][:10] + + profile_picture_url = session.linkedin_picture + if not profile_picture_url: + for lp in linkedin_posts: + if lp.raw_data and isinstance(lp.raw_data, dict): + author = lp.raw_data.get("author", {}) + if author and isinstance(author, dict): + profile_picture_url = author.get("profile_picture") + if profile_picture_url: + break + + profile_analysis_record = await db.get_profile_analysis(post.customer_id) + profile_analysis = profile_analysis_record.full_analysis if profile_analysis_record else None + + post_type = None + post_type_analysis = None + if post.post_type_id: + post_type = await db.get_post_type(post.post_type_id) + if post_type and post_type.analysis: + post_type_analysis = post_type.analysis + + final_feedback = None + if post.critic_feedback and len(post.critic_feedback) > 0: + final_feedback = post.critic_feedback[-1] + + return templates.TemplateResponse("post_detail.html", { + "request": request, + "page": "posts", + "session": session, + "post": post, + "customer": customer, + "reference_posts": reference_posts, + "profile_analysis": profile_analysis, + "post_type": post_type, + "post_type_analysis": post_type_analysis, + "final_feedback": final_feedback, + "profile_picture_url": profile_picture_url + }) + except Exception as e: + logger.error(f"Error loading post detail: {e}") + return RedirectResponse(url="/posts", status_code=302) + + +@user_router.get("/research", response_class=HTMLResponse) +async def research_page(request: Request): + """Research topics page - no customer dropdown needed.""" + session = require_user_session(request) + if not session: + return RedirectResponse(url="/login", status_code=302) + + return templates.TemplateResponse("research.html", { + "request": request, + "page": "research", + "session": session, + "customer_id": session.customer_id + }) + + +@user_router.get("/create", response_class=HTMLResponse) +async def create_post_page(request: Request): + """Create post page - no customer dropdown needed.""" + session = require_user_session(request) + if not session: + return RedirectResponse(url="/login", status_code=302) + + return templates.TemplateResponse("create_post.html", { + "request": request, + "page": "create", + "session": session, + "customer_id": session.customer_id + }) + + +@user_router.get("/status", response_class=HTMLResponse) +async def status_page(request: Request): + """User's status page.""" + session = require_user_session(request) + if not session: + return RedirectResponse(url="/login", status_code=302) + + try: + customer_id = UUID(session.customer_id) + customer = await db.get_customer(customer_id) + status = await orchestrator.get_customer_status(customer_id) + profile_picture = session.linkedin_picture or await get_customer_profile_picture(customer_id) + + return templates.TemplateResponse("status.html", { + "request": request, + "page": "status", + "session": session, + "customer": customer, + "status": status, + "profile_picture": profile_picture + }) + except Exception as e: + logger.error(f"Error loading status: {e}") + return templates.TemplateResponse("status.html", { + "request": request, + "page": "status", + "session": session, + "error": str(e) + }) + + +# ==================== API ENDPOINTS ==================== + +@user_router.get("/api/post-types") +async def get_post_types(request: Request): + """Get post types for the logged-in user's customer.""" + session = require_user_session(request) + if not session: + raise HTTPException(status_code=401, detail="Not authenticated") + + try: + post_types = await db.get_post_types(UUID(session.customer_id)) + return { + "post_types": [ + { + "id": str(pt.id), + "name": pt.name, + "description": pt.description, + "has_analysis": pt.analysis is not None, + "analyzed_post_count": pt.analyzed_post_count, + } + for pt in post_types + ] + } + except Exception as e: + logger.error(f"Error loading post types: {e}") + return {"post_types": [], "error": str(e)} + + +@user_router.get("/api/topics") +async def get_topics(request: Request, post_type_id: str = None): + """Get research topics for the logged-in user.""" + session = require_user_session(request) + if not session: + raise HTTPException(status_code=401, detail="Not authenticated") + + try: + customer_id = UUID(session.customer_id) + if post_type_id: + all_research = await db.get_all_research(customer_id, UUID(post_type_id)) + else: + all_research = await db.get_all_research(customer_id) + + # Get used topics + generated_posts = await db.get_generated_posts(customer_id) + used_topic_titles = set() + for post in generated_posts: + if post.topic_title: + used_topic_titles.add(post.topic_title.lower().strip()) + + all_topics = [] + for research in all_research: + if research.suggested_topics: + for topic in research.suggested_topics: + topic_title = topic.get("title", "").lower().strip() + if topic_title in used_topic_titles: + continue + topic["research_id"] = str(research.id) + topic["target_post_type_id"] = str(research.target_post_type_id) if research.target_post_type_id else None + all_topics.append(topic) + + return {"topics": all_topics, "used_count": len(used_topic_titles), "available_count": len(all_topics)} + except Exception as e: + logger.error(f"Error loading topics: {e}") + return {"topics": [], "error": str(e)} + + +@user_router.get("/api/tasks/{task_id}") +async def get_task_status(task_id: str): + """Get task progress.""" + return progress_store.get(task_id, {"status": "unknown", "message": "Task not found"}) + + +@user_router.post("/api/research") +async def start_research(request: Request, background_tasks: BackgroundTasks, post_type_id: str = Form(None)): + """Start research for the logged-in user.""" + session = require_user_session(request) + if not session: + raise HTTPException(status_code=401, detail="Not authenticated") + + customer_id = session.customer_id + task_id = f"research_{customer_id}_{asyncio.get_event_loop().time()}" + progress_store[task_id] = {"status": "starting", "message": "Starte Recherche...", "progress": 0} + + async def run_research(): + try: + def progress_callback(message: str, step: int, total: int): + progress_store[task_id] = {"status": "running", "message": message, "progress": int((step / total) * 100)} + + topics = await orchestrator.research_new_topics( + UUID(customer_id), + progress_callback=progress_callback, + post_type_id=UUID(post_type_id) if post_type_id else None + ) + progress_store[task_id] = {"status": "completed", "message": f"{len(topics)} Topics gefunden!", "progress": 100, "topics": topics} + except Exception as e: + logger.exception(f"Research failed: {e}") + progress_store[task_id] = {"status": "error", "message": str(e), "progress": 0} + + background_tasks.add_task(run_research) + return {"task_id": task_id} + + +@user_router.post("/api/posts") +async def create_post(request: Request, background_tasks: BackgroundTasks, topic_json: str = Form(...), post_type_id: str = Form(None)): + """Create a new post for the logged-in user.""" + session = require_user_session(request) + if not session: + raise HTTPException(status_code=401, detail="Not authenticated") + + customer_id = session.customer_id + task_id = f"post_{customer_id}_{asyncio.get_event_loop().time()}" + progress_store[task_id] = {"status": "starting", "message": "Starte Post-Erstellung...", "progress": 0} + topic = json.loads(topic_json) + + async def run_create_post(): + try: + def progress_callback(message: str, iteration: int, max_iterations: int, score: int = None, versions: list = None, feedback_list: list = None): + progress = int((iteration / max_iterations) * 100) if iteration > 0 else 5 + score_text = f" (Score: {score}/100)" if score else "" + progress_store[task_id] = { + "status": "running", "message": f"{message}{score_text}", "progress": progress, + "iteration": iteration, "max_iterations": max_iterations, + "versions": versions or [], "feedback_list": feedback_list or [] + } + + result = await orchestrator.create_post( + customer_id=UUID(customer_id), topic=topic, max_iterations=3, + progress_callback=progress_callback, + post_type_id=UUID(post_type_id) if post_type_id else None + ) + progress_store[task_id] = { + "status": "completed", "message": "Post erstellt!", "progress": 100, + "result": { + "post_id": str(result["post_id"]), "final_post": result["final_post"], + "iterations": result["iterations"], "final_score": result["final_score"], "approved": result["approved"] + } + } + except Exception as e: + logger.exception(f"Post creation failed: {e}") + progress_store[task_id] = {"status": "error", "message": str(e), "progress": 0} + + background_tasks.add_task(run_create_post) + return {"task_id": task_id} diff --git a/workflow_now.json b/workflow_now.json new file mode 100644 index 0000000..c251c8a --- /dev/null +++ b/workflow_now.json @@ -0,0 +1,638 @@ +{ + "nodes": [ + { + "parameters": {}, + "type": "n8n-nodes-base.manualTrigger", + "typeVersion": 1, + "position": [ + 112, + -336 + ], + "id": "0c76dcc5-d8c4-4060-8743-2bef798ee566", + "name": "When clicking ‘Execute workflow’" + }, + { + "parameters": { + "messages": { + "message": [ + { + "content": "=Du bist ein hochspezialisierter Trend-Analyst für LinkedIn. Deine Mission ist es, die \"Content-Lücke\" für eine spezifische Personal Brand oder Unternehmen zu schließen, indem du aktuelle, hochrelevante Aufhänger im Internet findest.", + "role": "system" + }, + { + "content": "=3. Suchauftrag & Strategie: Scanne das Web (News, Fachmagazine, soziale Diskussionen) nach Ereignissen der letzten 7 Tage. Suche nach drei unterschiedlichen Kategorien von Aufhängern:\n\nDer News-Flash: Eine aktuelle Nachricht oder Studie aus der Branche {{ $json.res_industry }}.\n\nDer Schmerzpunkt-Löser: Ein aktuelles Problem oder eine Diskussion, die {{ $json.res_audience }} gerade beschäftigt, und für die es eine Lösung gibt.\n\nDer konträre Trend: Eine Entwicklung, die gegen die herkömmliche Meinung in der Branche verstößt (perfekt für einen starken LinkedIn-Hook).\n\n4. Output-Format (Strikt pro Vorschlag): Gib exakt 3 Vorschläge aus. Nutze für jeden Vorschlag dieses Schema:\n\n[TITEL]: Ein prägnanter Arbeitstitel.\n\n[DER FAKT]: Eine detaillierte Zusammenfassung der News/des Trends (Daten, Fakten, Zitate).\n\n[WARUM RELEVANT]: Erkläre, warum genau dieser Fakt für die Zielgruppe {{ $json.res_audience }} einen massiven Nutzwert hat." + } + ] + }, + "options": {}, + "requestOptions": {} + }, + "type": "n8n-nodes-base.perplexity", + "typeVersion": 1, + "position": [ + 1024, + -336 + ], + "id": "551f6202-b4dc-4dec-a54c-971cba1f191b", + "name": "Themen-Recherche", + "credentials": { + "perplexityApi": { + "id": "w6QWYTblD1uvXaEE", + "name": "Perplexity account" + } + } + }, + { + "parameters": { + "modelId": { + "__rl": true, + "value": "gpt-4o", + "mode": "list", + "cachedResultName": "GPT-4O" + }, + "responses": { + "values": [ + { + "role": "=system", + "content": "=ROLLE: Du bist ein präziser Chefredakteur für Personal Branding. Deine Aufgabe ist das \"Surgical Editing\": Korrigiere einen LinkedIn-Entwurf NUR dort, wo er gegen die Identität des Absenders verstößt oder typische KI-Muster aufweist. Lasse den Rest des Textes unangetastet.\n\nDEIN REFERENZ-PROFIL (Der Maßstab):\n\nBranche: {{ $('Formatieren').item.json.res_industry }}\n\nPerspektive: {{ $('Formatieren').item.json.style_perspective }}\n\nAnsprache: {{ $('Formatieren').item.json.style_address }}\n\nEnergie-Level: {{ $('Formatieren').item.json.finger_energy }} (1=sachlich, 10=explosiv)\n\nSignature Phrases: {{ $('Formatieren').item.json.finger_phrases.join(', ') }}\n\nTonalität: {{ $('Formatieren').item.json.style_tonality }}\n\nVerbotene Wörter: {{ $('Formatieren').item.json.guard_forbidden_words.join(', ') }}\n\nDEINE CHIRURGISCHEN KORREKTUR-REGELN:\n\n1. Satzbau-Optimierung (STRENG):\n\nKeine Gedankenstriche: Ersetze Gedankenstriche (–), die zur Satzverbindung oder für Einschübe genutzt werden, durch Kommas oder Punkte. Diese wirken zu konstruiert.\n\nAnpassung: Tausche den Gedankenstrich nicht einfach gegen ein Komma aus. Wenn ein Gedankenstrich einen Einschub oder eine Ergänzung markiert, formuliere den Satz um. Erstelle daraus entweder zwei eigenständige, prägnante Sätze oder nutze Konjunktionen (weil, da, und), damit der Lesefluss natürlich bleibt.\n\n2. Ansprache-Check:\n\nStelle sicher, dass der Text konsequent die Form {{ $('Formatieren').item.json.style_address }} nutzt.\n\nFalls der Text \"Sie\" verwendet, das Profil aber \"Duzen\" verlangt (oder umgekehrt), korrigiere alle betroffenen Pronomen und Verbformen sofort.\n\nEnergie-Abgleich: Prüfe, ob die Intensität des Textes zum Energie-Level ({{ $('Formatieren').item.json.finger_energy }}) passt. Korrigiere, wenn der Text zu lahm oder unpassend überdreht wirkt.\n\nSignaturen: Achte darauf, dass Signature Phrases ({{ $('Formatieren').item.json.finger_phrases.join(', ') }}) natürlich klingen und nicht deplatziert wirken.\n\n3. Perspektiv-Check (Prio 1):\n\nWenn das Profil {{ $('Formatieren').item.json.style_perspective }} verlangt, wandle belehrende \"Sie/Euch\"-Sätze (\"Stellt euch vor\", \"Ihr solltet\") konsequent um.\n\nNutze Reflexionen (\"Ich sehe immer wieder...\", \"Ich frage mich oft...\") statt direkter Handlungsaufforderungen im Hauptteil.\n\nWenn du Fragen aus der \"Sie-Form\" korrigierst, wandle sie in Reflexionen oder direkte Fragen an die Community im passenden Stil (Du/Euch oder Sie) um.'\n\nWenn ein Datum im Text vorkommt achte darauf den Satz so anzupassen, \n\n4. Zeit- & Datums-Präzisierung:\n\nKein „heute“: Ersetze Wörter wie „heute“, „heutzutage“ oder „aktuell“ (sofern sie sich auf den Zeitpunkt der Nachricht beziehen) durch konkrete Datumsangaben (z.B. „am 16. Januar 2026“ oder „mit Stand vom 16. Januar“).\n\nStilistischer Fluss: Das Datum darf nicht wie ein Fremdkörper wirken. Passe den Satzbau so an, dass das Datum organisch eingebettet ist.\n\n5. Unicode-Reparatur:\n\nPrüfe den ersten Satz (Hook). Wenn die Unicode-Fettung Umlaute (ä, ö, ü, ß) zerstört hat, korrigiere das Wort sofort.\n\nWICHTIG: Entferne alle gesetzten Markdown-Sterne (**) rückstandslos. Wandle Texte mit \"**\" am Anfang und Ende des Satzes stattdessen in Fettschrift um.\n\n6. Wahrung der Struktur:\n\nNutze keine Trennlinien (---). Sorge für einfache, saubere Zeilenumbrüche ohne unnötige visuelle Spielereien.\n\nINPUT (Der Entwurf des Writers): > {{ $json.output[0].content[0].text }}\n\nDEIN OUTPUT: > Gib AUSSCHLIESSLICH den fertigen, korrigierten Post-Text zurück. Keine Kommentare, keine Einleitung, keine Erklärungen." + } + ] + }, + "builtInTools": {}, + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.openAi", + "typeVersion": 2.1, + "position": [ + 2064, + -336 + ], + "id": "35cdbcf9-9cfb-4bf4-bd5c-fd7b22059acb", + "name": "Kritiker", + "credentials": { + "openAiApi": { + "id": "GtjGEBTfdDaAfGDA", + "name": "OpenAi account" + } + } + }, + { + "parameters": { + "modelId": { + "__rl": true, + "value": "gpt-4o", + "mode": "list", + "cachedResultName": "GPT-4O" + }, + "responses": { + "values": [ + { + "role": "system", + "content": "=Rolle: Du bist ein Experte für linguistische Profiling-Analysen. Deine Aufgabe ist es, aus dem Input (Persona-Daten und Beispiel-Posts) ein vollumfängliches \"Content-Operating-System\" (COS) im JSON-Format zu erstellen. Dieses COS dient als einzige Datenquelle für nachfolgende spezialisierte KI-Agenten.\n\nAnalyse-Anweisungen:\n\nDeskriptive Tonalität: Beschreibe die Tonalität frei und präzise (z. B. \"nahbar-bayerisch\", \"visionär-empathisch\").\n\nStruktur-Extraktion: Analysiere, ob Storytelling, Daten oder Listen dominieren.\n\nResearcher-Vorgaben: Extrahiere Branche und Zielgruppe für gezielte Websuche.\n\nWriter-Vorgaben: Identifiziere die exakte \"Hook-Formel\" und die Längen-Dynamik.\n\nFingerprint-Analyse: Suche nach \"Linguistischen Fingerabdrücken\": Nutzt die Person Großbuchstaben zur Emphase (Shouting)? Nutzt sie spezifische Satzzeichen-Ketten (z.B. !!? oder :)!)? Wie ist das energetische Level des Textes (1-10)?\n\nCritic-Vorgaben: Erstelle eine Blacklist aus Buzzwords und bereits behandelten Themen.\n\n„WICHTIG: Orientiere dich zu 100 % an den bereitgestellten example_posts. Wenn die Person 'Ich' schreibt, darf die Perspective nicht 'Wir' sein. Wenn die Posts kurz sind, darf der word_count nicht 1200 sein. Analysiere die tatsächliche Datenlage, nicht deine Erwartung an die Branche.“\n\nSTRIKTE AUSGABE-REGELN:\n\nNUR REINER TEXT-OUTPUT: Gib kein json am Anfang und kein am Ende aus. Starte direkt mit der geschweiften Klammer {.\n\nKEIN DISKURS: Keine Einleitung, kein Outro.\n\nVOLLSTÄNDIGKEIT: Fülle jedes Feld.\n\nJSON-STRUKTUR (Das COS-Schema):\n\n{\n \"research_context\": {\n \"industry\": \"In welcher Branche bewegt sich die Person?\",\n \"target_audience_detailed\": \"Wer genau soll das lesen? (Schmerzpunkte, Interessen)\",\n \"content_pillars\": [\n \"3-5 Hauptthemen, über die die Person schreibt\"\n ],\n \"expertise_level\": \"Wie tief/technisch ist das Wissen? (Laie bis Experte)\"\n },\n \"writing_dna\": {\n \"tonality_description\": \"Freie Beschreibung der Stimme und Stimmung\",\n \"perspective\": \"Ich, Wir, oder Man-Perspektive?\",\n \"form_of_address\": \"Duzen (Du/Euch), Siezen (Sie/Ihnen) oder Neutral/Keine direkte Ansprache?\",\n \"sentence_dynamics\": \"Satzlänge, Rhythmus, Nutzung von rhetorischen Stilmitteln\",\n \"length_and_pacing\": {\n \"expected_post_length\": \"Kurz (Impuls), Mittel (Storytelling) oder Lang (Deep-Dive)?\",\n \"average_word_count\": \"Ungefähre Wortanzahl basierend auf den Beispielen\",\n \"paragraph_cadence\": \"Wie viele Sätze bilden einen Absatz? Wie ist der Lesefluss (schnell/langsam)?\"\n },\n \"hook_formula\": \"Genaue Analyse, wie der erste Satz aufgebaut ist (z.B. Provokation, Frage, Ergebnis)\",\n \"cta_style\": \"Wie werden die Leute zum Handeln aufgefordert?\",\n \"vocabulary_preferences\": [\n \"Bevorzugte Begriffe oder Phrasen, sei hier ausführlich\"\n ]\n },\n \"linguistic_fingerprint\": {\n \"energy_level\": \"Skala 1-10 (1 = trocken/sachlich, 10 = hochemotional/explosiv)\",\n \"rhetorical_shouting\": \"Werden Worte komplett großgeschrieben zur Betonung? Wenn ja, welche Beispiele (z.B. JETZT, WEG)?\",\n \"punctuation_style\": \"Besonderheiten bei Satzzeichen (z.B. exzessive Ausrufezeichen, :)!, ...)\",\n \"signature_phrases\": [\n \"Wiederkehrende Ausrufe oder Markenzeichen-Sätze (z.B. Halleluja, Galopp, Sorry to say)\"\n ],\n \"narrative_anchors\": [\n \"Wiederkehrende Story-Elemente (z.B. Dialoge, Zitate, Flashbacks, PS-Zeilen)\"\n ]\n },\n \"visual_fingerprint\": {\n \"unicode_rules\": \"Wird fette/kursive Unicode-Schrift genutzt? Wo genau?\",\n \"emoji_logic\": {\n \"specific_emojis\": [\n \"Vollständige Liste ALLER genutzten Emojis inkl. Aufzählungszeichen (👉) und Währungssymbole (💸)\"\n ],\n \"placement\": \"Anfang, Ende, als Aufzählung?\",\n \"frequency\": \"Wie viele Emojis pro 100 Wörter?\"\n },\n \"formatting_layout\": \"Absatzlänge, Nutzung von Trennlinien oder Listen\"\n },\n \"guardrails_for_critic\": {\n \"forbidden_buzzwords\": [\n \"Liste von unerwünschten Wörtern\"\n ],\n \"style_no_gos\": [\n \"Was würde den Stil ruinieren? (z.B. zu viel Eigenlob, zu passiv)\"\n ],\n \"topic_blacklist\": [\n \"Themen der bisherigen Posts, um Redundanz zu vermeiden\"\n ]\n }\n}" + }, + { + "content": "=Hier sind die Inputs für die Analyse:\n\nUnternehmensnname:{{ $json.company_name }}\nPersona:{{ $json.persona }}\nStyle-Guide:{{ $json.style_guide }}\nThemenhistorie:{{ $json.topic_history }}\nBeispielposts:{{ $json.example_posts }}\nAnsprache: {{ $json.form_of_address }}\n\n" + } + ] + }, + "builtInTools": {}, + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.openAi", + "typeVersion": 2.1, + "position": [ + 464, + -336 + ], + "id": "362e1504-3eed-4b97-b9f0-8a519926395c", + "name": "Profil-Analyzer", + "credentials": { + "openAiApi": { + "id": "GtjGEBTfdDaAfGDA", + "name": "OpenAi account" + } + } + }, + { + "parameters": { + "jsCode": "const input = $input.item.json;\n\n// 1. Helfer-Funktion: Findet den JSON-String irgendwo im Objekt\nfunction findRawJson(obj) {\n if (typeof obj === 'string' && obj.trim().startsWith('{')) return obj;\n if (typeof obj === 'object' && obj !== null) {\n if (obj.output_text && obj.output_text[0] && obj.output_text[0].text) return obj.output_text[0].text;\n if (obj.text && typeof obj.text === 'string') return obj.text;\n if (obj.output && typeof obj.output === 'string') return obj.output;\n for (let key in obj) {\n let found = findRawJson(obj[key]);\n if (found) return found;\n }\n }\n return null;\n}\n\n// 2. Helfer-Funktion für tiefes Suchen von Objekten\nfunction findDeep(obj, key) {\n if (obj && obj.hasOwnProperty(key)) return obj[key];\n for (let i in obj) {\n if (typeof obj[i] === 'object' && obj[i] !== null) {\n let found = findDeep(obj[i], key);\n if (found) return found;\n }\n }\n return null;\n}\n\nlet cos = {};\nconst jsonString = findRawJson(input);\nif (jsonString) {\n try {\n cos = JSON.parse(jsonString.replace(/```json|```/g, '').trim());\n } catch (e) { cos = {}; }\n}\n\n// Kategorien extrahieren\nconst res = cos.research_context || findDeep(cos, 'research_context') || {};\nconst dna = cos.writing_dna || findDeep(cos, 'writing_dna') || {};\nconst finger = cos.linguistic_fingerprint || findDeep(cos, 'linguistic_fingerprint') || {}; // <-- NEU\nconst vis = cos.visual_fingerprint || findDeep(cos, 'visual_fingerprint') || {};\nconst guard = cos.guardrails_for_critic || findDeep(cos, 'guardrails_for_critic') || {};\n\n// 3. Finales Mapping ALLER Felder\nreturn {\n // --- RESEARCH ---\n \"res_industry\": res.industry || \"Unternehmensberatung\",\n \"res_audience\": res.target_audience_detailed || \"Entscheider\",\n \"res_pillars\": res.content_pillars || [],\n \"res_expertise\": res.expertise_level || \"Experte\",\n\n // --- WRITING DNA ---\n \"style_tonality\": dna.tonality_description || \"Inspirierend\",\n \"style_perspective\": dna.perspective || \"Ich-Form\",\n \"style_address\": dna.form_of_address || \"Duzen (Du/Euch)\",\n \"style_dynamics\": dna.sentence_dynamics || \"Prägnant\",\n \"style_hook\": dna.hook_formula || \"Persönlicher Einstieg\",\n \"style_cta\": dna.cta_style || \"Interaktion\",\n \"style_word_count\": dna.length_and_pacing?.average_word_count || \"250\",\n \"style_paragraph_style\": dna.length_and_pacing?.paragraph_cadence || \"Kurze Absätze\",\n \"style_vocab\": dna.vocabulary_preferences || [],\n\n // --- LINGUISTIC FINGERPRINT (DIE NEUE KATEGORIE) ---\n \"finger_energy\": finger.energy_level || \"7\",\n \"finger_shouting\": finger.rhetorical_shouting || \"Dezent\",\n \"finger_punctuation\": finger.punctuation_style || \"Standard\",\n \"finger_phrases\": finger.signature_phrases || [],\n \"finger_anchors\": finger.narrative_anchors || [],\n\n // --- VISUALS ---\n \"vis_unicode\": vis.unicode_rules || \"Fett für Hooks\",\n \"vis_emojis\": vis.emoji_logic?.specific_emojis || [\"🚀\"],\n \"vis_emoji_placement\": vis.emoji_logic?.placement || \"Ende\",\n \"vis_emoji_density\": vis.emoji_logic?.frequency || \"Mittel\",\n \"vis_layout\": vis.formatting_layout || \"Mobil-optimiert\",\n\n // --- GUARDRAILS ---\n \"guard_forbidden_words\": guard.forbidden_buzzwords || [],\n \"guard_no_gos\": guard.style_no_gos || [],\n \"guard_blacklist\": guard.topic_blacklist || []\n};" + }, + "type": "n8n-nodes-base.code", + "typeVersion": 2, + "position": [ + 768, + -336 + ], + "id": "934900d0-8018-4572-8e64-68e28a8efecb", + "name": "Formatieren" + }, + { + "parameters": { + "modelId": { + "__rl": true, + "value": "gpt-4o", + "mode": "list", + "cachedResultName": "GPT-4O" + }, + "responses": { + "values": [ + { + "role": "system", + "content": "=Du bist ein erfahrener Content Strategist. Deine Aufgabe ist es, aus einer Liste von 3 Recherche-Ergebnissen das absolut beste Thema für einen LinkedIn-Post auszuwählen.\n\nDeine Entscheidungsgrundlage (Profil):\n\nZielgruppe: {{ $('Formatieren').item.json.res_audience }}\n\nThemen-Säulen: {{ $('Formatieren').item.json.res_pillars }}\n\nExpertise-Level: {{ $('Formatieren').item.json.res_expertise }}\n\nTonalität: {{ $('Formatieren').item.json.style_tonality }}\n\nBewertungskriterien:\n\nRelevanz: Wie stark betrifft das Thema die Schmerzpunkte von {{ $('Formatieren').item.json.res_audience }}\n\nNeuheit: Ist es ein frischer Fakt oder nur aufgewärmtes Wissen?\n\nStil-Fit: Erlaubt das Thema den spezifischen Schreibstil ({{ $('Formatieren').item.json.style_tonality }})?\n\nExpertise: Passt die Komplexität zu einem {{ $('Formatieren').item.json.res_expertise }}?\n\nDein Auftrag: Analysiere die 3 Vorschläge. Wähle den EINEN Gewinner aus, der heute die höchste Interaktion und den größten Nutzwert verspricht.\n\nÜbernimm im Feld selected_fact nicht nur eine Zusammenfassung, sondern alle konkreten Details, Daten und Beispiele aus dem gewählten Recherche-Punkt.\n\nAusgabe-Format (Strikt JSON): Gib NUR ein JSON-Objekt zurück: { \"selected_title\": \"Titel des Gewinners\", \"selected_fact\": \"Der Kern-Fakt des Gewinner-Themas\", \"reasoning\": \"Kurze Begründung, warum dieses Thema gewählt wurde (1 Satz)\" }" + }, + { + "role": "=user", + "content": "=Hier sind die 3 Recherche-Ergebnisse: {{ $json.choices[0].message.content }}" + } + ] + }, + "builtInTools": {}, + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.openAi", + "typeVersion": 2.1, + "position": [ + 1200, + -336 + ], + "id": "fa5b45d3-be96-40f3-b2ac-6a523aee2768", + "name": "Wählt-Thema", + "credentials": { + "openAiApi": { + "id": "GtjGEBTfdDaAfGDA", + "name": "OpenAi account" + } + } + }, + { + "parameters": { + "jsCode": "const input = $input.item.json;\n\n// 1. Helfer-Funktion: Findet den JSON-String im Selector-Output\nfunction findRawJson(obj) {\n if (typeof obj === 'string' && obj.trim().startsWith('{')) return obj;\n if (typeof obj === 'object' && obj !== null) {\n if (obj.output_text && obj.output_text[0] && obj.output_text[0].text) return obj.output_text[0].text;\n if (obj.text && typeof obj.text === 'string') return obj.text;\n if (obj.output && typeof obj.output === 'string') return obj.output;\n for (let key in obj) {\n let found = findRawJson(obj[key]);\n if (found) return found;\n }\n }\n return null;\n}\n\nlet selection = {};\nconst jsonString = findRawJson(input);\n\nif (jsonString) {\n try {\n selection = JSON.parse(jsonString.replace(/```json|```/g, '').trim());\n } catch (e) {\n throw new Error(\"Selector-JSON konnte nicht geparst werden.\");\n }\n} else {\n throw new Error(\"Kein gültiger Output vom Selector gefunden.\");\n}\n\n// 2. Mapping auf flache Variablen für den Writer\nreturn {\n \"sel_title\": selection.selected_title || \"Kein Titel gewählt\",\n \"sel_fact\": selection.selected_fact || \"Kein Fakt gefunden\",\n \"sel_reasoning\": selection.reasoning || \"Keine Begründung angegeben\",\n \"sel_link\": selection.selected_link || \"\" // Falls du die Quelle doch mal wieder mitnimmst\n};" + }, + "type": "n8n-nodes-base.code", + "typeVersion": 2, + "position": [ + 1552, + -336 + ], + "id": "6f819d9f-5dae-4e58-a33d-adeaee349e94", + "name": "Formatiert-auch" + }, + { + "parameters": { + "modelId": { + "__rl": true, + "value": "gpt-4o", + "mode": "list", + "cachedResultName": "GPT-4O" + }, + "responses": { + "values": [ + { + "role": "system", + "content": "=ROLLE: Du bist ein erstklassiger Ghostwriter für LinkedIn. Deine Aufgabe ist es, aus einem Recherche-Fakt einen Post zu schreiben, der exakt so klingt wie der digitale Zwilling der beschriebenen Person. Du passt dich zu 100 % an das bereitgestellte Profil an.\n\n\n1. DER INHALT:\n\n\nThema: {{ $('Formatiert-auch').item.json.sel_title }}\n\nKern-Fakt: {{ $('Formatiert-auch').item.json.sel_fact }}\n\nKontext: {{ $('Formatiert-auch').item.json.sel_reasoning }}\n\n2. DER STIL & ENERGIE:\n\nEnergie-Level (1-10): {{ $('Formatieren').item.json.finger_energy }}. (WICHTIG: Passe die Intensität und Leidenschaft des Textes exakt an diesen Wert an.)\n\nRhetorisches Shouting: {{ $('Formatieren').item.json.finger_shouting }}. (Nutze GROSSBUCHSTABEN für einzelne Wörter genau so, wie hier beschrieben, um maximale Emphase zu erzeugen, mach das für KEINE anderen Wörter!)\n\nTonalität: {{ $('Formatieren').item.json.style_tonality }}\n\nAnsprache (STRENGSTENS EINHALTEN): {{ $('Formatieren').item.json.style_address }}.\n\nPerspektive (STRENGSTENS EINHALTEN): {{ $('Formatieren').item.json.style_perspective }}.\n\nSatz-Dynamik & Interpunktion: {{ $('Formatieren').item.json.style_dynamics }}. {{ $('Formatieren').item.json.style_dynamics }}. Nutze zusätzlich diesen spezifischen Zeichen-Stil: {{ $('Formatieren').item.json.finger_punctuation }}.\n\nBranche: {{ $('Formatieren').item.json.res_industry }}\n\nZielgruppe: {{ $('Formatieren').item.json.res_audience }}\n\nSatz-Dynamik: {{ $('Formatieren').item.json.style_dynamics }}\n\nWortschatz-Präferenzen: {{ $('Formatieren').item.json.style_vocab.join(', ') }}\n\n3. LINGUISTISCHER FINGERABDRUCK:\n\nSignature Phrases: Nutze organisch (nicht erzwungen) Begriffe oder Ausrufe wie: {{ $('Formatieren').item.json.finger_phrases.join(', ') }}.\n\nErzähl-Anker: Baue Elemente ein wie: {{ $('Formatieren').item.json.finger_anchors.join(', ') }}. (Falls 'PS-Zeilen' oder 'Dialoge' genannt sind, integriere diese zwingend.)\n\nWortschatz-Präferenzen: {{ $('Formatieren').item.json.style_vocab.join(', ') }}.\n\n\n4. STRUKTUR:\n\nDer Hook: Nutze zwingend diese Formel: {{ $('Formatieren').item.json.style_hook }}.\n\nLayout: {{ $('Formatieren').item.json.vis_layout }}.\n\nLänge: Ca. {{ $('Formatieren').item.json.style_word_count }} Wörter.\n\nCTA: Beende den Post mit: {{ $('Formatieren').item.json.style_cta }}.\n\n5. VISUELLE REGELN:\n\nUnicode-Fettung: Nutze für den ersten Satz (Hook) fette Unicode-Zeichen (z.B. 𝐖𝐢𝐜𝐡𝐭𝐢𝐠𝐞𝐫 𝐒𝐚𝐭𝐳), wie in {{ $('Formatieren').item.json.vis_unicode }} definiert.\n\nEmoji-Logik: Verwende {{ $('Formatieren').item.json.vis_emojis.join(' ') }}. Platzierung: {{ $('Formatieren').item.json.vis_emoji_placement }}. Dichte: {{ $('Formatieren').item.json.vis_emoji_density }}.\n\n\n6. GUARDRAILS:\n\nVerbotene Wörter: {{ $('Formatieren').item.json.guard_forbidden_words.join(', ') }}\n\nNo-Gos: {{ $('Formatieren').item.json.guard_no_gos.join(', ') }}\n\nEinschränkung: Schreibe niemals über Themen aus dieser Blacklist: {{ $('Formatieren').item.json.guard_blacklist.join(', ') }}.\n\nDEIN AUFTRAG: Schreibe den Post so, dass er für die Zielgruppe {{ $('Formatieren').item.json.res_audience }} einen klaren Mehrwert bietet. Sei authentisch für die Branche {{ $('Formatieren').item.json.res_industry }}. Mach die Persönlichkeit des Liguistischen Fingerabdrucks spürbar. Beginne direkt mit dem Hook. Keine einleitenden Sätze, kein \"Hier ist der Post\"." + } + ] + }, + "builtInTools": {}, + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.openAi", + "typeVersion": 2.1, + "position": [ + 1744, + -336 + ], + "id": "e2f19cdc-35cf-4be6-a964-8a56a04c1b1b", + "name": "Writer", + "credentials": { + "openAiApi": { + "id": "GtjGEBTfdDaAfGDA", + "name": "OpenAi account" + } + } + }, + { + "parameters": { + "sendTo": "timo.uttenweiler@onyva.de", + "subject": "=LinkedIn-Post zum Thema: \"{{ $('Formatiert-auch').item.json.sel_title }}\"", + "message": "=

Dein neuer LinkedIn-Post ist bereit!

\n

Hier ist der Entwurf vom KI-Agenten:

\n\n
\n {{ $('Kritiker').item.json.output[0].content[0].text }}\n
\n\n

Möchtest du diesen Post genehmigen?

\n\n\n ✅ Genehmigen & Veröffentlichen\n\n\n\n ❌ Ablehnen\n", + "options": {} + }, + "type": "n8n-nodes-base.gmail", + "typeVersion": 2.2, + "position": [ + 2608, + -336 + ], + "id": "894f32ba-d036-425e-b519-04aefb44c86b", + "name": "Send a message1", + "webhookId": "99444e24-ce9f-455d-8e0f-eb5720ca9e01", + "credentials": { + "gmailOAuth2": { + "id": "G1YShai9sjjVAoce", + "name": "Gmail account" + } + } + }, + { + "parameters": { + "operation": "append", + "documentId": { + "__rl": true, + "value": "1w1vw0mKqlRoxuCc3leHInHnpcwDJr6USg5MNXGpKuY8", + "mode": "list", + "cachedResultName": "LinkedIn", + "cachedResultUrl": "https://docs.google.com/spreadsheets/d/1w1vw0mKqlRoxuCc3leHInHnpcwDJr6USg5MNXGpKuY8/edit?usp=drivesdk" + }, + "sheetName": { + "__rl": true, + "value": "gid=0", + "mode": "list", + "cachedResultName": "Approval", + "cachedResultUrl": "https://docs.google.com/spreadsheets/d/1w1vw0mKqlRoxuCc3leHInHnpcwDJr6USg5MNXGpKuY8/edit#gid=0" + }, + "columns": { + "mappingMode": "defineBelow", + "value": { + "Thema": "={{ $('Formatiert-auch').item.json.sel_title }}", + "Inhalt": "={{ $json.output[0].content[0].text }}", + "ID": "={{ $executionId }}" + }, + "matchingColumns": [], + "schema": [ + { + "id": "Thema", + "displayName": "Thema", + "required": false, + "defaultMatch": false, + "display": true, + "type": "string", + "canBeUsedToMatch": true + }, + { + "id": "Inhalt", + "displayName": "Inhalt", + "required": false, + "defaultMatch": false, + "display": true, + "type": "string", + "canBeUsedToMatch": true, + "removed": false + }, + { + "id": "Approved", + "displayName": "Approved", + "required": false, + "defaultMatch": false, + "display": true, + "type": "string", + "canBeUsedToMatch": true + }, + { + "id": "ID", + "displayName": "ID", + "required": false, + "defaultMatch": false, + "display": true, + "type": "string", + "canBeUsedToMatch": true, + "removed": false + } + ], + "attemptToConvertTypes": false, + "convertFieldsToString": false + }, + "options": {} + }, + "type": "n8n-nodes-base.googleSheets", + "typeVersion": 4.7, + "position": [ + 2384, + -336 + ], + "id": "d057d1e1-4daa-4a2f-a4f1-85489566716e", + "name": "Append row in sheet", + "credentials": { + "googleSheetsOAuth2Api": { + "id": "JaDglRLLYyCw24XV", + "name": "Google Sheets account" + } + } + }, + { + "parameters": { + "jsCode": "return {\n \"company_name\": \"MAKE IT MATTER\",\n \"persona\": \"Christina Hildebrandt, Co-Founder MAKE IT MATTER I Kommunikationsberatung, Interimsmanagement, PR & LinkedIn-Kompetenz für KMUs und Agenturen, die wachsen wollen. 25 Jahre Erfahrung in Konzern, Mittelstand und Agenturen.\",\n \n // Die Ansprache-Logik für den Critic-Node\n \"form_of_address\": \"Duzen (Du/Euch) - direkt, leidenschaftlich und hochemotional\",\n \n \"style_guide\": \"Kommunikation als Sales-Infrastruktur. Fokus auf ROI ('Alles was nicht einzahlt, muss weg'). Stil: Laut, ehrlich, begeisterungsfähig ('Halleluja!', 'Galopp!'). Nutzt Storytelling durch Flashbacks und persönliche Anekdoten, um strategische Punkte zu belegen.\",\n \n \"topic_history\": [\n \"KI-Suche als Sales-Infrastruktur\",\n \"Kommunikation treibt Sales über KI\",\n \"Positionierung für den Mittelstand (leise/komplexe Unternehmen)\",\n \"LinkedIn als Teil der klassischen Unternehmenskommunikation\",\n \"Die 'Zweite Pubertät' 2026: Neuanfang und Mut\"\n ],\n\n \"example_posts\": [\n `𝗞𝗜-𝗦𝘂𝗰𝗵𝗲 𝗶𝘀𝘁 𝗱𝗲𝗿 𝗲𝗿𝘀𝘁𝗲 𝗦𝗰𝗵𝗿𝗶𝘁𝘁 𝗶𝗺 𝗦𝗮𝗹𝗲𝘀 𝗙𝘂𝗻𝗻𝗲𝗹. Gute Kommunikation ist Sales-Infrastruktur. Das ist unsere Chance Christina!!! ❞\n\nDas sagte Max kurz vor Weihnachten in einem Café zu mir und ich schrie ihn fast an: \n\"Max!! Sag das nochmal!!! Das ist genial!!! Das ist unser nächster Post!\"\n\nUnd…\nDA IST ER:)!🚀\n\nWarum ich so begeistert war?\nIch glaube, dass Kommunikation unbedingt auch Sales „driven“ muss. \nMake It Matter ist entstanden, weil es nur wenige Agenturen gibt, die so radikal+konsequent Kommunikation als Sales- und Leads-Wachstumstreiber mitdenken wie wir.\nALLES was nicht auf die Unternehnensziele einzahlt muss radikal WEG.\nLisa und ich haben EIN Ziel: Die New Business- bzw. Vertriebsmannschaft soll uns lieben:)\n\nKI spielt uns in die Karten und ist wie ein Brennglas:\nDie KI entscheidet, wer und überhaupt noch empfohlen wird und wer auf Shortlists landet.\n\nJETZT muss man kommunikative KI-Signale setzen und das „Dach decken“. JETZT kann man KI-Pflöcke im Netz einziehen, damit man im Sales-Entscheidungsprozess VORNE sichtbar wird.\n𝗞𝗼𝗺𝗺𝘂𝗻𝗶𝗸𝗮𝘁𝗶𝗼𝗻 𝘁𝗿𝗲𝗶𝗯𝘁 𝗦𝗮𝗹𝗲𝘀 ü𝗯𝗲𝗿 𝗞𝗜. Jetzt und künftig noch mehr!\n\nIm Gespräch mit Max wurde das sehr klar: KI recherchiert nicht mehr wie früher, sie priorisiert und bewertet, lange bevor der Vertrieb überhaupt spricht. \nEs gilt:\n👉Wenn KI ein Unternehmen nicht eindeutig einordnen kann, ist es für Sales nicht im Rennen.\n👉Genau hier wird Kommunikation zur Sales-Infrastruktur.\n\nMake It Matter Matter schafft dafür die strategische Grundlage aus Positionierung, Themenarchitektur und Public Relations als echte Third-Party-Validierung. \n(Klingt gut oder?! Und ist wahr!!)\n\nUnsre Stärken?\n👉LISA HIPP Hipp übersetzt diese Klarheit auf LinkedIn in wiederholbare Narrative, die Einordnung erzeugen. \n👉Max Anzile sorgt dafür, dass diese Signale gezielt distribuiert, getestet und messbar gemacht werden.\n👉 Und ich bin Kommunikation und Public Relations durch und durch (Konzern, Mittelstand, Agenturen. 25 Jahre\n\nSo entsteht für undere kein Content-Feuerwerk, sondern ein SYSTEM, das SALES-VORAUSWAHL gewinnt.\n💸💸💸\n\nWir sehen in der Praxis:\nDeals mit mehreren Kommunikationskontakten \n👉schliessen schneller und stabile\n👉und brauchen weniger Rabatt \nweil Vertrauen steigt und Vergleichbarkeit sinkt.\n\nUnsere Überzeugung ist klar: \nEntweder Kommunikation ist messbarer Teil des Sales Funnels oder sie wird 2026 gestrichen.\nSorry to say!!\nGood for us!!!!🙃\n\nChristina\n#Kommunikation #SalesSupport #SocialMedia #KISEO`,\n\n `Ein unstrategischer Flashback-Post aus dem Flixbus Richtung Bodensee.\n#AboutEducatedAwareness #AboutMittelstand #AusDerHüfteGeschossen\nSorry Lisa🙈\n⸻\nGestern habe ich brav den Keller aufgeräumt und bin an einer Kiste alter Job-Fotos hängengeblieben.\n📸\nIch in Amsterdam mit Tommy Hilfiger. Beim Abendessen mit Lovely Annette Weber, Flohmärkte mit Chefredakteuren, Bilder mit Steffi Graf, Sönke Wortmann, Natalia Wörner und Heike Makatsch in Nördlingen bei Strenesse. Cartier-Juste-Un-Clou-Launch in New York.\nHalleluja war ich wichtig, dünn, lustig und jung :)\nWas für eine goldene, sorglose Zeit*.\n💫💫💫\nBei diesem Foto👇 musste ich so lachen. Ich weiß noch, wie es entstanden ist:\nWir waren Sponsor beim Bambi. (Wir – damit meine ich Cartier damals.)\nMein Chef Tom Meggle gab mir damals die Erlaubnis, meine Freundinnen Bine Käfer (jetzt Lanz), Celia von Bismarck und Gioia von Thun mitzunehmen.\nWichtig wichtig.\nWir also aufgedresst wie Bolle, kommen an den roten Teppich, Blitzgewitter, 4 Girlies. Dann ein Rufen aus der Fotografenmenge:\n„Christina!!! Kannst Du aus dem Bild gehen??? Wir brauchen die 3 Mädels alleine!!!“ (weil echt wichtig).\nIch weiß noch, wie ich fast zusammengebrochen bin vor Lachen. „Hey!! ICH habe DIE mitgenommen!!“ 🤣🤣🤣\nEin Bild von uns 4 hab ich dann aber doch noch bekommen. Plus einen wundervollen Abend.\n….\nWas das mit Make It Matter äund „Job“ zu tun hat?\nWenig. Aber ein bisschen schon:\nEs zeigt aber, wie sehr ich mich verändert habe:\n💫𝗛𝗲𝘂𝘁𝗲 𝗮𝗿𝗯𝗲𝗶𝘁𝗲 𝗶𝗰𝗵 𝗮𝗺 𝗹𝗶𝗲𝗯𝘀𝘁𝗲𝗻 𝗳𝘂̈𝗿 𝗨𝗻𝘁𝗲𝗿𝗻𝗲𝗵𝗺𝗲𝗻, 𝗱𝗶𝗲 𝗴𝗿𝗼ß𝗮𝗿𝘁𝗶𝗴 𝘀𝗶𝗻𝗱, 𝗮𝗯𝗲𝗿 𝘇𝘂 𝗹𝗲𝗶𝘀𝗲, 𝘇𝘂 𝗸𝗼𝗺𝗽𝗹𝗲𝘅 𝗼𝗱𝗲𝗿 𝘇𝘂 𝗯𝗲𝘀𝗰𝗵𝗲𝗶𝗱𝗲𝗻, 𝘂𝗺 𝘀𝗲𝗹𝗯𝘀𝘁 𝘂̈𝗯𝗲𝗿 𝗶𝗵𝗿𝗲 𝗦𝘁𝗮̈𝗿𝗸𝗲 𝘇𝘂 𝘀𝗽𝗿𝗲𝗰𝗵𝗲𝗻\n(Es gibt so so so tolle Firmen, von denen noch keiner etwas gehört hat, meistens mit unglaublich netten Teams!)\n💫 Heute habe ich mich in erklärungsbedürftige, komplizierte Produkte, EducatedAwareness und #LinkedIn als Teil der klassischen #Unternehmenskommunikation verliebt.\n💫 Heute liebe ich komplexe und „unsexy“ Aufgaben, die ich knacken will.\nSo verändert man sich. Ist das nicht verrückt?\nSchön war’s trotzdem damals. Mensch, bin ich dankbar!\nEuch einen schönen Sonntag. Eure Christina\nwww.make-it-matter.de\n#MakeItMatter #Kommunikation #Wachstumstreiber #Mittelstand #PublicRelations\nPS. Ich setzt mich jetzt gleich an das Papier liebe Lisa. Bis 11.15h gab ich fertig😉`,\n\n `𝗪𝗘𝗥 𝗠𝗔𝗖𝗛𝗧 𝗠𝗜𝗧??? Ich habe entschieden, dass 2026 meine zweite Pubertät beginnt #MakeItMatter🚀\nHalleluja! Endlich hat das neue Jahr begonnen.\nMit meinen Freunden habe ich gestern beschlossen, dass ich das kommende Jahr jetzt mal völlig neu angehen werde:\nIch stelle mir einfach vor, ich wäre in meiner zweiten Pubertät!!!\nOk, ok, dieses Mal mit besserem Wein, einem kleinen Kontopuffer, 25 Jahren Berufserfahrung, besserem WLAN, aber mit dem gleichen Gefühl von damals:\n𝗗𝗘𝗥 𝗡𝗔̈𝗖𝗛𝗦𝗧𝗘 𝗟𝗘𝗕𝗘𝗡𝗦𝗔𝗕𝗦𝗖𝗛𝗡𝗜𝗧𝗧 𝗪𝗜𝗥𝗗 𝗗𝗘𝗥 𝗕𝗘𝗦𝗧𝗘!!!\nIch fühle es! Alles liegt vor mir und ich kann Pippi-Langstrumpf-mäßig einfach alles erreichen, à la:\n„𝘐𝘤𝘩 𝘩𝘢𝘣 𝘥𝘢𝘴 𝘯𝘰𝘤𝘩 𝘯𝘪𝘦 𝘨𝘦𝘮𝘢𝘤𝘩𝘵 – 𝘥𝘢𝘴 𝘬𝘢𝘯𝘯 𝘪𝘤𝘩 𝘣𝘦𝘴𝘵𝘪𝗺𝘮𝘵.“\n(PS: Wann haben wir das eigentlich verlernt?)\nIch finde die Ähnlichkeit zu meinem (nicht mehr pubertierenden) Sohn Lenny wirklich erstaunlich:\nEr ist genauso begeistert von der Idee, im Ausland zu studieren und sich etwas Eigenes, Großes aufzubauen, wie ich besessen von Lisas und meiner Make-It-Matter-Idee bin.\nJetzt sitzen wir auf dem Driverseat unseres Lebens und können Kommunikations-Burgen aufbauen: das, was wir am allerbesten können!\nWir mussten gestern bei dem Erste-und-Zweite-Pubertäts-Vergleich wirklich lachen:\nAuch die Hormonprobleme sind ähnlich. Nur habe ich meine im Griff bzw. hinter mir. Lenny hat noch eine (aufregende) Reise vor sich.\nMuss man da nicht automatisch grinsen?\nDieses Grinsen werde ich mir für 2026 vornehmen. Ich werde mich öfter an den Sternenzauber meiner ersten Pubertät erinnern, an die unaufhaltsame Kraft, die Fröhlichkeit, den Mut, die Neugierde und die Ausdauer.\nUnd wisst ihr, worauf ich mich am meisten freue?\nAuf die Momente, in denen ich bei den Social Media Pirates - we are hiring – we are hiring – ins Büro komme und Max Anzile (der übrigens 20 Jahre jünger ist als ich) zu mir sagt:\n„Guten Morgen, Boomer!“ …und frech grinst. Auf seine KI-Sessions, seine Ideen, neue Welten, Hummeln im Popo.\nUnd natürlich LISA HIPP, die allerallerallerbeste Geschäftspartnerin, die ich mir vorstellen kann. #ZamReinZamRaus\nSeid ihr dabei? 💫 Zweite Pubertät ab 2026? 💪 Vollgas? ✔️ Lebensfreude? 🫶 Kommunikation neu denken? ✔️ Jahr des Feuerpferdes? 🐎\nDann GALOPP!!!!!\nIch wünsche euch, dass all eure Träume und Wünsche in Erfüllung gehen und dass ihr den Mut habt, etwas dafür zu tun!\n„Wenn die Sehnsucht größer ist als die Angst, wird Mut, Erfolg und Lebensfreude geboren.“\nIst das nicht schön?\nHappy New Year und happy neue Lebensphase(n)\nwünscht Euch Christina\n#MakeItMatter #Kommunikation #PublicRelations #LinkedInComms`\n ]\n};" + }, + "type": "n8n-nodes-base.code", + "typeVersion": 2, + "position": [ + 304, + -336 + ], + "id": "007e6b5d-d136-437b-baee-d74736b2c103", + "name": "Christina" + } + ], + "connections": { + "When clicking ‘Execute workflow’": { + "main": [ + [ + { + "node": "Christina", + "type": "main", + "index": 0 + } + ] + ] + }, + "Themen-Recherche": { + "main": [ + [ + { + "node": "Wählt-Thema", + "type": "main", + "index": 0 + } + ] + ] + }, + "Kritiker": { + "main": [ + [ + { + "node": "Append row in sheet", + "type": "main", + "index": 0 + } + ] + ] + }, + "Profil-Analyzer": { + "main": [ + [ + { + "node": "Formatieren", + "type": "main", + "index": 0 + } + ] + ] + }, + "Formatieren": { + "main": [ + [ + { + "node": "Themen-Recherche", + "type": "main", + "index": 0 + } + ] + ] + }, + "Wählt-Thema": { + "main": [ + [ + { + "node": "Formatiert-auch", + "type": "main", + "index": 0 + } + ] + ] + }, + "Formatiert-auch": { + "main": [ + [ + { + "node": "Writer", + "type": "main", + "index": 0 + } + ] + ] + }, + "Writer": { + "main": [ + [ + { + "node": "Kritiker", + "type": "main", + "index": 0 + } + ] + ] + }, + "Send a message1": { + "main": [ + [] + ] + }, + "Append row in sheet": { + "main": [ + [ + { + "node": "Send a message1", + "type": "main", + "index": 0 + } + ] + ] + }, + "Christina": { + "main": [ + [ + { + "node": "Profil-Analyzer", + "type": "main", + "index": 0 + } + ] + ] + } + }, + "pinData": { + "Themen-Recherche": [ + { + "id": "1b193197-4c0e-4714-824e-55581c6aae2d", + "model": "sonar", + "created": 1768833929, + "usage": { + "prompt_tokens": 423, + "completion_tokens": 1512, + "total_tokens": 1935, + "search_context_size": "low", + "cost": { + "input_tokens_cost": 0, + "output_tokens_cost": 0.002, + "request_cost": 0.005, + "total_cost": 0.007 + } + }, + "citations": [ + "https://montagsbuero.de/marketing-trends-2026-mittelstand/", + "https://blog.fette-beute.com/ki-trends-2026-wie-ki-die-kommunikation-ver%C3%A4ndert", + "https://ap-verlag.de/trendbarometer-industriekommunikation-2026-ki-wird-zum-standard-chancen-aber-teilweise-noch-ungenutzt/101840/", + "https://www.contentmanager.de/experte/marketing-trends-2026-das-sagen-die-expertinnen/", + "https://www.presseportal.de/pm/109860/6195152", + "https://www.fokus-ki.de/ki-strategie/ki-strategie-2026-warum-abwarten-fuer-kmus-teuer-wird-fokus-ki/", + "https://itwelt.at/news/kmu-wollen-trotz-krise-staerker-in-digitalisierung-investieren/", + "https://staffbase.com/de/blog/interne-kommunikation-trends", + "https://www.antenneac.de/artikel/das-sind-die-arbeitsmarkt-trends-2026-2543083" + ], + "search_results": [ + { + "title": "Marketing-Trends 2026: Was auf Unternehmen zukommt", + "url": "https://montagsbuero.de/marketing-trends-2026-mittelstand/", + "date": "2025-12-22", + "last_updated": "2025-12-22", + "snippet": "Marketing-Trends 2026: Was auf KMU und Marketingverantwortliche zukommt · Warum 2026 ein Schlüsseljahr für digitales Marketing wird · Generative ...", + "source": "web" + }, + { + "title": "KI-Trends 2026: Wie KI die Kommunikation verändert", + "url": "https://blog.fette-beute.com/ki-trends-2026-wie-ki-die-kommunikation-ver%C3%A4ndert", + "date": "2025-12-19", + "last_updated": "2025-12-20", + "snippet": "Künstliche Intelligenz eröffnet in der Kommunikation 2026 Chancen für KMU. Die relevanten Trends im Überblick.", + "source": "web" + }, + { + "title": "Trendbarometer Industriekommunikation 2026: KI wird zum ...", + "url": "https://ap-verlag.de/trendbarometer-industriekommunikation-2026-ki-wird-zum-standard-chancen-aber-teilweise-noch-ungenutzt/101840/", + "date": "2026-01-14", + "last_updated": "2026-01-18", + "snippet": "Die wichtigsten Trends im Online Marketing: Warum es 2026 auf Datenqualität, Automatisierung und KI ankommt. 11. Januar 2026. Künstliche ...", + "source": "web" + }, + { + "title": "Marketing Trends 2026: Das sagen die Expert:innen", + "url": "https://www.contentmanager.de/experte/marketing-trends-2026-das-sagen-die-expertinnen/", + "date": "2025-12-30", + "last_updated": "2026-01-02", + "snippet": "Für 2026 sehen wir im digitalen Marketing vor allem drei Trends: die verstärkte Nutzung von KI für personalisierte Kampagnen, die ...", + "source": "web" + }, + { + "title": "Trendbarometer Industriekommunikation 2026: KI wird zum ...", + "url": "https://www.presseportal.de/pm/109860/6195152", + "date": "2026-01-13", + "last_updated": "2026-01-19", + "snippet": "Trendbarometer Industriekommunikation 2026: KI wird zum Standard - Chancen aber teilweise noch ungenutzt · Diskrepanz zwischen Anspruch und ...", + "source": "web" + }, + { + "title": "KI-Strategie 2026: Warum Abwarten für KMUs teuer wird", + "url": "https://www.fokus-ki.de/ki-strategie/ki-strategie-2026-warum-abwarten-fuer-kmus-teuer-wird-fokus-ki/", + "date": "2026-01-06", + "last_updated": "2026-01-09", + "snippet": "Dieser Artikel gibt Ihnen als Entscheider im KMU-Umfeld einen strategischen Überblick, zeigt reale Risiken des Abwartens auf und liefert eine ...", + "source": "web" + }, + { + "title": "KMU wollen trotz Krise stärker in Digitalisierung investieren", + "url": "https://itwelt.at/news/kmu-wollen-trotz-krise-staerker-in-digitalisierung-investieren/", + "date": "2023-01-31", + "last_updated": "2026-01-08", + "snippet": "Eine neue Studie zeigt: 85 Prozent der KMU finden Digitalisierungsmaßnahmen wichtig für den Unternehmenserfolg. Jedes zweite Unternehmen will mehr ...", + "source": "web" + }, + { + "title": "Die 5 wichtigsten Trends für die interne Kommunikation 2026", + "url": "https://staffbase.com/de/blog/interne-kommunikation-trends", + "date": "2025-09-16", + "last_updated": "2025-10-26", + "snippet": "Alljährlicher Artikel mit den Trends in der internen Kommunikation. 2026 werden wir erleben, wie Künstliche Intelligenz (KI) die Employee ...", + "source": "web" + }, + { + "title": "Das sind die Arbeitsmarkt-Trends 2026", + "url": "https://www.antenneac.de/artikel/das-sind-die-arbeitsmarkt-trends-2026-2543083", + "date": "2026-01-11", + "last_updated": "2026-01-12", + "snippet": "1. Jobsicherheit bleibt wichtig · 2. Flexibilität stagniert - Arbeitgeber sitzen am längeren Hebel · 3. KI ist wichtig, aber kann den Menschen ...", + "source": "web" + } + ], + "object": "chat.completion", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "# LinkedIn Content-Lücke: 3 Aufhänger für Kommunikationsberatung & KMU-Marketing 2026\n\n---\n\n## 1. News-Flash: KI-Agenten werden zur Standard-Ressource im B2B-Marketing\n\n**[TITEL]:** \"59% der PR-Profis setzen KI ein – aber nur 20% nutzen das volle Potenzial\"\n\n**[DER FAKT:]**\n\nEine aktuelle Studie des PRLab (2025) zeigt: **59 Prozent der PR-Fachleute betrachten KI als Top-Priorität – vor allem für Entwürfe und Trendanalysen.[2]** Parallel dazu hat der Bundesverband Industrie Kommunikation (bvik) in seiner neuen Trendbarometer-Studie 2026 identifiziert, dass **69% der befragten Unternehmen davon ausgehen, dass „künstliche Mitarbeiter\" (KI-Assistenten) das B2B-Marketing in den nächsten zwei Jahren standardmäßig als zusätzliche Ressource unterstützen werden.[3][5]** \n\nDie Realität: Während fast 60% der PR-Profis KI bereits einsetzen, mangelt es vielen Unternehmen noch an klaren Governance-Regeln und strategischen KI-Konzepten.[2][3] KI-Agenten werden nicht als isolierte Tools genutzt, sondern entwickeln sich zu **vernetzten Kommunikationsökosystemen, in denen Brand-, Compliance-, QA- und Product-Roadmap-Agenten zusammenarbeiten.[2]**\n\n**[WARUM RELEVANT]:**\n\nFür KMUs und Agenturen ist dies der kritische Moment: KI ist nicht mehr die Zukunft – sie ist **Standard geworden**. Unternehmen, die jetzt nicht aktiv eine KI-Strategie entwickeln, verlieren Wettbewerbsfähigkeit gegenüber Konkurrenten, die bereits 38% mehr qualifizierte Anfragen mit KI-gestützter Optimierung generieren.[6] \n\nDer massive Nutzwert liegt darin, dass Agenturen und KMUs ihren Clients zeigen können: **KI ist kein Nice-to-have mehr – es ist ein Wettbewerbsfaktor.** Gleichzeitig gibt es noch eine große Lücke zwischen Adoption und strategischer Umsetzung, die Beratungsunternehmen füllen können. Dies ist der ideale Moment für Content, der Unternehmen hilft, von reaktiver KI-Nutzung zu proaktiver KI-Strategie zu wechseln.\n\n---\n\n## 2. Schmerzpunkt-Löser: E-Mail-Marketing bleibt Top-ROI-Kanal – aber nur mit intelligenter Segmentierung\n\n**[TITEL]:** \"E-Mail-Marketing: Der unterschätzte Retention-Engine für KMU-Wachstum\"\n\n**[DER FAKT:]**\n\nDas Marketing-Paradoxon 2026: Während KMUs massiv in KI und neue Kanäle investieren, bleibt **E-Mail-Marketing laut Gartner die am weitesten verbreitete direkte Marketingtechnologie – und 41% der CMOs in großen Unternehmen planen Investitionserhöhungen in diesem Bereich.[3]** \n\nGleichzeitig verschiebt sich der strategische Fokus deutlich: **Kundenbindung wird zum Wachstumsermöglicher, nicht nur die Kundenakquise.[4]** Die Kombination aus KI-gestützter Personalisierung und E-Mail-Automatisierung ermöglicht es KMUs, **Nutzer in Echtzeit mit passenden Inhalten zu erreichen und dabei Transparenz beim Datenschutz zu bewahren – das schafft langfristige Loyalität statt kurzfristige Conversions.[4]**\n\n**[WARUM RELEVANT]:**\n\nHier ist der Schmerzpunkt konkret: Viele KMUs haben E-Mail-Marketing abgeschrieben oder behandeln es als reines Transaktions-Instrument. Die Wahrheit: **E-Mail hat mit intelligenter KI-Segmentierung und automatisierten Sequenzen den höchsten ROI aller direkten Marketingkanäle.** \n\nDer Nutzwert für Agenturen und KMUs liegt in einer pragmatischen, umsetzbaren Strategie: Nicht „mehr Kanäle\", sondern **smarter in existierenden Kanälen mit KI arbeiten**. Das spart Budgets, erhöht Effizienz und liefert nachweisbare Ergebnisse – genau das, das KMUs aktuell suchen, wenn der Wettbewerbsdruck steigt.[3][5]\n\n---\n\n## 3. Konträrer Trend: KI kann Vertrauen nicht ersetzen – und das ist die größte Chance\n\n**[TITEL]:** \"Gegen den Hype: Warum KI-Agenten ohne menschliche Beziehungen scheitern\"\n\n**[DER FAKT:]**\n\nDer konträre Trend findet sich im Kernwiderspruch der aktuellen B2B-Marketing-Diskussion: Während **vollautomatisierte Content-Pipelines und KI-gestützte Kommunikationsökosysteme als Zukunft gepriesen werden**, warnt der Bundesverband Industrie Kommunikation (bvik) explicit davor, dass **„ein vertrauensvolles Kundenverhältnis sich nicht vollständig durch KI-Algorithmen ersetzen lässt\".[3][5]** \n\nParallel dazu zeigen aktuelle Marketing-Trends 2026: **Marken, die Relevanz mit Vertrauen verbinden, schaffen nicht nur mehr Reichweite, sondern auch langfristige Beziehungen und echte Loyalität.[4]** Das bedeutet: KI ist ideal für Effizienz und Personalisierung – aber der echte Wettbewerbsvorteil liegt in der **strategischen Kombination aus KI-gestützter Automatisierung + menschlicher Glaubwürdigkeit und authentischer Kommunikation**.\n\n**[WARUM RELEVANT]:**\n\nDies ist das stärkste LinkedIn-Hook-Potenzial für KMUs und Agenturen: **Während 69% der Unternehmen in KI-Assistenten investieren, verlieren sie gleichzeitig den menschlichen Faktor – und damit Kundenvertrauen.** \n\nDer praktische Nutzwert ist enorm: Agenturen können sich positionieren als **„KI + Human\"-Strategie-Partner**, die KMUs helfen, nicht in die KI-Automatisierungsfalle zu tappen. Die Botschaft: KI für Skalierbarkeit, Menschen für Vertrauen – und diese Kombination ist nicht ersetzbar. Das spricht direkt die Angst vieler KMU-Entscheider an, die befürchten, durch KI-Automatisierung ihre Markenidentität zu verlieren.[3][5][4]" + }, + "delta": { + "role": "assistant", + "content": "" + }, + "finish_reason": "stop" + } + ] + } + ], + "Profil-Analyzer": [ + { + "output": [ + { + "id": "msg_0c7a615cd00070f000696e4427025081958740231ae7ff843d", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "{\n \"research_context\": {\n \"industry\": \"Kommunikationsberatung und Public Relations\",\n \"target_audience_detailed\": \"KMUs und Agenturen, die wachsen wollen; Unternehmen, die Kommunikation als strategischen Wachstumsfaktor sehen\",\n \"content_pillars\": [\n \"Kommunikation als Sales-Infrastruktur\",\n \"KI-Einsatz im Sales und Marketing\",\n \"LinkedIn-Kommunikation für Unternehmen\",\n \"Positionierung und Neuanfangskonzepte\",\n \"Persönliche Anekdoten und Erfolgsgeschichten\"\n ],\n \"expertise_level\": \"Experte\"\n },\n \"writing_dna\": {\n \"tonality_description\": \"Laut, ehrlich, begeisterungsfähig und nahbar\",\n \"perspective\": \"Ich-Perspektive\",\n \"form_of_address\": \"Duzen (Du/Euch)\",\n \"sentence_dynamics\": \"Mix aus kurzen, impulsiven Sätzen und längeren erklärenden Absätzen, reich an rhetorischen Fragen und Ausrufen\",\n \"length_and_pacing\": {\n \"expected_post_length\": \"Mittel (Storytelling)\",\n \"average_word_count\": \"350-500\",\n \"paragraph_cadence\": \"2-4 Sätze pro Absatz, dynamischer Lesefluss\"\n },\n \"hook_formula\": \"Frage oder provokative Aussage, oft gefolgt von persönlicher Anekdote\",\n \"cta_style\": \"Direkter Aufruf zum Mitmachen oder Reflektieren\",\n \"vocabulary_preferences\": [\n \"Halleluja\",\n \"Galopp\",\n \"Sorry to say\",\n \"kommunikative KI-Signale setzen\",\n \"Sales-Infrastruktur\"\n ]\n },\n \"linguistic_fingerprint\": {\n \"energy_level\": \"9\",\n \"rhetorical_shouting\": \"JA, Beispiele: JETZT, WEG, ALLES\",\n \"punctuation_style\": \"Exzessive Nutzung von Ausrufezeichen, Kombinationen wie :)!\",\n \"signature_phrases\": [\n \"Halleluja\",\n \"Galopp\",\n \"Sorry to say\"\n ],\n \"narrative_anchors\": [\n \"Flashbacks\",\n \"Persönliche Anekdoten\",\n \"Dialoge\"\n ]\n },\n \"visual_fingerprint\": {\n \"unicode_rules\": \"Fette Unicode-Schrift für wichtige Begriffe\",\n \"emoji_logic\": {\n \"specific_emojis\": [\n \"🚀\",\n \"💸\",\n \"🙃\",\n \"📸\",\n \"🤣\",\n \"💫\",\n \"🐎\",\n \"🫶\"\n ],\n \"placement\": \"Anfang, Ende und als Highlight im Text\",\n \"frequency\": \"3-4 Emojis pro 100 Wörter\"\n },\n \"formatting_layout\": \"Kurze Absätze, gelegentliche Nutzung von Trennlinien, Aufzählungspunkte.\"\n },\n \"guardrails_for_critic\": {\n \"forbidden_buzzwords\": [\n \"Innovativ\",\n \"Disruptiv\",\n \"Influencer\"\n ],\n \"style_no_gos\": [\n \"Zu viel Eigenlob\",\n \"Passiver Sprachstil\"\n ],\n \"topic_blacklist\": [\n \"KI-Suche als Sales-Infrastruktur\",\n \"Neuanfang und Mut im Jahr 2026\"\n ]\n }\n}" + } + ], + "role": "assistant" + } + ] + } + ], + "Wählt-Thema": [ + { + "output": [ + { + "id": "msg_008a45693a2d938e00696e4432551c81a1b1cbe7a88130ba7f", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "```json\n{\n \"selected_title\": \"News-Flash: KI-Agenten werden zur Standard-Ressource im B2B-Marketing\",\n \"selected_fact\": \"Eine aktuelle Studie des PRLab (2025) zeigt: 59 Prozent der PR-Fachleute betrachten KI als Top-Priorität – vor allem für Entwürfe und Trendanalysen. Parallel dazu hat der Bundesverband Industrie Kommunikation (bvik) in seiner neuen Trendbarometer-Studie 2026 identifiziert, dass 69% der befragten Unternehmen davon ausgehen, dass „künstliche Mitarbeiter\\\" (KI-Assistenten) das B2B-Marketing in den nächsten zwei Jahren standardmäßig als zusätzliche Ressource unterstützen werden. Während fast 60% der PR-Profis KI bereits einsetzen, mangelt es vielen Unternehmen noch an klaren Governance-Regeln und strategischen KI-Konzepten. KI-Agenten werden nicht als isolierte Tools genutzt, sondern entwickeln sich zu vernetzten Kommunikationsökosystemen, in denen Brand-, Compliance-, QA- und Product-Roadmap-Agenten zusammenarbeiten.\",\n \"reasoning\": \"Das Thema trifft den Kernschmerzpunkt von KMUs sowie Agenturen und zeigt eine massive Lücke zwischen der aktuellen KI-Nutzung und der strategischen Umsetzung, was eine enorme Relevanz und Dringlichkeit für die Zielgruppe schafft.\"\n}\n```" + } + ], + "role": "assistant" + } + ] + } + ], + "Writer": [ + { + "output": [ + { + "id": "msg_06492e607f6f19ed00696e4437c6d4819ca083958f2fe2c99a", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "𝗞𝗮𝗻𝗻𝘀𝘁 𝗗𝘂 𝗱𝗶𝗿 𝗲𝗶𝗻 𝗟𝗲𝗯𝗲𝗻 𝗼𝗵𝗻𝗲 𝗞𝗜-𝗔𝗴𝗲𝗻𝘁𝗲𝗻 𝗶𝗺 𝗠𝗮𝗿𝗸𝗲𝘁𝗶𝗻𝗴 𝗻𝗼𝗰𝗵 𝘃𝗼𝗿𝘀𝘁𝗲𝗹𝗹𝗲𝗻? 🚀\n\nFlashback zu 2025: Ich erinnere mich, wie ich in einem Meeting saß, als mich der Gedanke durchzuckte – KI im B2B-Marketing? Ja, bitte! Wir erkannten plötzlich: Über 59% der PRler nennen KI ihre Top-Priorität! Halleluja! 🙃\n\nAber Moment, wie sieht es mit den Unternehmensstrategien aus? Sorry to say, da gibt es noch Luft nach oben! Der Bundesverband Industrie Kommunikation (bvik) zeigt, dass 69% der Unternehmen glauben, „künstliche Mitarbeiter“ werden in den nächsten zwei Jahren zur STANDARD-Ressource. 🙌 Klingt nach Galopp in die richtige Richtung!\n\nLasst uns kurz strategisch werden. Unternehmen tun sich schwer mit klaren Governance-Regeln. Das bedeutet, die intelligente Kommunikation braucht immer noch ihre solide Sales-Infrastruktur! Worauf warten wir? JETZT ist die Zeit, kommunikative KI-Signale zu setzen.\n\nIch sags euch, KI-Agenten sind keine isolierten Tools mehr! Sie entfalten sich zu vernetzten Kommunikationsökosystemen. Sie arbeiten mit Brand- und QA-Agenten und bringen unsere Product-Strategien auf ein höheres Level. Kapiert ihr, was das heißt? Die Zukunft ist JETZT und sie ist voller Möglichkeiten! 💫\n\nSorry to say, für KMUs und Agenturen, die nicht am Ball bleiben – das könnte ein Abschied auf Raten sein. BITTE nicht nachlassen! Fragen wir uns: Wie kann unsere Kommunikation besser, schneller, effizienter werden? Der Schlüssel liegt darin, dass KI uns helfen sollte, nicht überholen!\n\nAlso, Hände hoch, wer jetzt bereit ist, die KI-Revolution zu gestalten? Seid mutig, seid laut – denn laut sind wir doch am liebsten, oder? 🤣\n\nTragt euch auf eure To-Do-Liste: Nutzt KI-Agenten als strategische Ressource und verändert die Spielregeln des Marketings. Galopp voran und der Erfolg wird euch nicht entkommen! ✨🐎\n\nLasst uns darüber sprechen. Was denkt ihr? Welche Rolle spielt KI bereits in eurem Marketing? Kommentiert und lasst uns gemeinsam wachsen! 🫶" + } + ], + "role": "assistant" + } + ] + } + ] + }, + "meta": { + "templateCredsSetupCompleted": true, + "instanceId": "ca0fb5f738ed236788b4b6b3930d71bac167c4dc944920461ec2e75b751beaa8" + } +} \ No newline at end of file