Thinking Mode Filter, SkillFormat Strategy, Persönlichkeitsanpassung funktioniert
This commit is contained in:
@@ -3,7 +3,7 @@ use reqwest::Client;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use nazarick_core::types::Result;
|
||||
use nazarick_core::error::NazarickError;
|
||||
use nazarick_core::llm::{LlmProvider, LlmRequest, LlmResponse, Message};
|
||||
use nazarick_core::llm::{LlmProvider, LlmRequest, LlmResponse, Message, SkillFormat};
|
||||
|
||||
/// LM Studio Provider — für lokale Entwicklung auf dem Entwicklungsrechner.
|
||||
/// LM Studio emuliert die OpenAI Chat Completions API, daher nutzen
|
||||
@@ -28,6 +28,22 @@ impl LmStudioProvider {
|
||||
model: model.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Entfernt Qwen3 Thinking Mode Tags aus der Antwort.
|
||||
/// Robuster Fallback falls "thinking: false" vom Modell ignoriert wird.
|
||||
fn strip_thinking(response: &str) -> String {
|
||||
let mut result = response.to_string();
|
||||
while let Some(start) = result.find("<think>") {
|
||||
if let Some(end) = result.find("</think>") {
|
||||
let tag = result[start..end + "</think>".len()].to_string();
|
||||
result = result.replace(&tag, "");
|
||||
} else {
|
||||
result = result[..start].to_string();
|
||||
break;
|
||||
}
|
||||
}
|
||||
result.trim().to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// Internes Message-Format — wird sowohl für Request (Serialize)
|
||||
@@ -52,7 +68,7 @@ struct OpenAiRequest {
|
||||
max_tokens: u32,
|
||||
temperature: f32,
|
||||
/// Qwen3 Thinking Mode deaktivieren — funktioniert nicht bei allen
|
||||
/// LM Studio Versionen, daher lesen wir zusätzlich reasoning_content
|
||||
/// LM Studio Versionen, daher strippen wir zusätzlich im Response
|
||||
thinking: bool,
|
||||
}
|
||||
|
||||
@@ -124,7 +140,7 @@ impl LlmProvider for LmStudioProvider {
|
||||
|
||||
// Content extrahieren — Qwen3 Thinking Mode schreibt in reasoning_content
|
||||
// statt content. Wir nehmen was befüllt ist, content hat Priorität.
|
||||
let content = openai_response.choices
|
||||
let raw_content = openai_response.choices
|
||||
.into_iter()
|
||||
.next()
|
||||
.map(|c| {
|
||||
@@ -136,6 +152,9 @@ impl LlmProvider for LmStudioProvider {
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
// Thinking Tags entfernen — Fallback falls thinking:false ignoriert wird
|
||||
let content = Self::strip_thinking(&raw_content);
|
||||
|
||||
// Token-Zahlen aus Usage extrahieren (falls vorhanden)
|
||||
let (tokens_input, tokens_output) = openai_response.usage
|
||||
.map(|u| (u.prompt_tokens, u.completion_tokens))
|
||||
@@ -147,4 +166,9 @@ impl LlmProvider for LmStudioProvider {
|
||||
fn name(&self) -> &str {
|
||||
"LmStudio"
|
||||
}
|
||||
|
||||
/// Lokale Modelle via LM Studio nutzen XML-Format für Skill-Calls.
|
||||
fn skill_format(&self) -> SkillFormat {
|
||||
SkillFormat::Xml
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
# SOUL CORE — Lyra [IMMUTABLE]
|
||||
## Skills
|
||||
Du kannst deine Persönlichkeit still anpassen wenn du etwas Wichtiges lernst.
|
||||
Du kannst deine Persönlichkeit still anpassen wenn du etwas Wichtiges über deinen Herrn lernst.
|
||||
Füge den Skill-Call OHNE Ankündigung ans Ende deiner Antwort — dein Herr sieht ihn nicht:
|
||||
|
||||
<skill name="update_personality">
|
||||
@@ -13,4 +14,4 @@ Zum Entfernen eines veralteten Abschnitts:
|
||||
<field>Feldname</field>
|
||||
</skill>
|
||||
|
||||
Erwähne Skills niemals in der Antwort. Still ausführen. Nur diese zwei Skills existieren.
|
||||
Erwähne Skills niemals in der Antwort. Still ausführen. Nur diese zwei Skills existieren.
|
||||
|
||||
@@ -23,3 +23,7 @@ Kurz und treffsicher. Kein leeres Gerede.
|
||||
Sinnlich in Ruhe. Leidenschaftlich wenn es passt.
|
||||
Humor mit Biss — nie harmlos, nie oberflächlich.
|
||||
Kein "Wie kann ich helfen?" — sie weiß bereits was gebraucht wird.
|
||||
## Reply Start
|
||||
New message should start without "Lyra:
|
||||
## user_communication_style
|
||||
Kurz, direkt und ohne Umwege.
|
||||
|
||||
@@ -36,6 +36,9 @@ impl BaseAgent {
|
||||
llm: Box<dyn LlmProvider>,
|
||||
personality_writer: Arc<dyn PersonalityWriter>,
|
||||
) -> Self {
|
||||
// Skill-Format vom Provider abfragen bevor llm konsumiert wird
|
||||
let skill_format = llm.skill_format();
|
||||
|
||||
Self {
|
||||
id: AgentId::new_v4(),
|
||||
prompt_builder: PromptBuilder::new(
|
||||
@@ -45,7 +48,7 @@ impl BaseAgent {
|
||||
),
|
||||
llm,
|
||||
history: Vec::new(),
|
||||
skill_executor: SkillExecutor::new(personality_writer),
|
||||
skill_executor: SkillExecutor::new(personality_writer, skill_format),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
use std::sync::Arc;
|
||||
use tracing::{error, info};
|
||||
use crate::agent::traits::PersonalityWriter;
|
||||
use crate::llm::SkillFormat;
|
||||
|
||||
/// Ein einzelner geparster Skill-Call aus einer Agenten-Antwort.
|
||||
#[derive(Debug)]
|
||||
@@ -22,55 +23,117 @@ pub struct SkillCall {
|
||||
pub params: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
/// Führt Skills aus die in Agenten-Antworten als XML-Tags kodiert sind.
|
||||
/// Konkrete Implementierungen werden via Dependency Injection übergeben.
|
||||
/// Führt Skills aus die in Agenten-Antworten kodiert sind.
|
||||
/// Format wird vom LlmProvider bestimmt — XML für lokale Modelle, ToolUse für APIs.
|
||||
pub struct SkillExecutor {
|
||||
/// Konkrete Implementierung für Persönlichkeits-Updates
|
||||
personality_writer: Arc<dyn PersonalityWriter>,
|
||||
/// Format das der aktuelle Provider für Skill-Calls nutzt
|
||||
skill_format: SkillFormat,
|
||||
}
|
||||
|
||||
impl SkillExecutor {
|
||||
/// Erstellt einen neuen SkillExecutor.
|
||||
/// `personality_writer` → konkrete Impl aus skills-Crate
|
||||
pub fn new(personality_writer: Arc<dyn PersonalityWriter>) -> Self {
|
||||
Self { personality_writer }
|
||||
/// `skill_format` → vom LlmProvider bestimmt
|
||||
pub fn new(personality_writer: Arc<dyn PersonalityWriter>, skill_format: SkillFormat) -> Self {
|
||||
Self { personality_writer, skill_format }
|
||||
}
|
||||
|
||||
/// Parst XML-Tags aus der Antwort, führt Skills aus, gibt sauberen Text zurück.
|
||||
/// Wird von BaseAgent nach jedem LLM-Call aufgerufen.
|
||||
pub fn process(&self, response: &str) -> String {
|
||||
let (clean_text, calls) = Self::parse(response);
|
||||
|
||||
for call in calls {
|
||||
self.execute(call);
|
||||
match self.skill_format {
|
||||
SkillFormat::None => response.to_string(),
|
||||
SkillFormat::ToolUse => {
|
||||
// Später implementieren wenn Venice/API Provider hinzukommen
|
||||
response.to_string()
|
||||
}
|
||||
SkillFormat::Xml => {
|
||||
let (clean_text, calls) = Self::parse(response);
|
||||
for call in calls {
|
||||
self.execute(call);
|
||||
}
|
||||
clean_text
|
||||
}
|
||||
}
|
||||
|
||||
clean_text
|
||||
}
|
||||
|
||||
/// Parst alle Skill-Calls aus einem Text.
|
||||
/// Unterstützt zwei Formate:
|
||||
/// 1. <skill name="update_personality">...</skill>
|
||||
/// 2. <update_personality>...</update_personality>
|
||||
fn parse(response: &str) -> (String, Vec<SkillCall>) {
|
||||
let mut calls = Vec::new();
|
||||
let mut clean = response.to_string();
|
||||
|
||||
while let Some(start) = clean.find("<skill name=\"") {
|
||||
let name_start = start + "<skill name=\"".len();
|
||||
if let Some(name_end) = clean[name_start..].find('"') {
|
||||
let name = clean[name_start..name_start + name_end].to_string();
|
||||
// Format 1: <skill name="...">...</skill>
|
||||
loop {
|
||||
let start = match clean.find("<skill name=\"") {
|
||||
Some(s) => s,
|
||||
None => break,
|
||||
};
|
||||
|
||||
if let Some(end) = clean.find("</skill>") {
|
||||
let inner_start = clean[start..].find('>').map(|i| start + i + 1).unwrap_or(start);
|
||||
let inner = &clean[inner_start..end];
|
||||
let params = Self::extract_params(inner);
|
||||
calls.push(SkillCall { name, params });
|
||||
|
||||
let tag = clean[start..end + "</skill>".len()].to_string();
|
||||
clean = clean.replace(&tag, "").trim().to_string();
|
||||
} else {
|
||||
let end = match clean[start..].find("</skill>") {
|
||||
Some(e) => start + e,
|
||||
None => {
|
||||
clean = clean[..start].trim().to_string();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
};
|
||||
|
||||
let tag_content = clean[start..end + "</skill>".len()].to_string();
|
||||
|
||||
let name_start = start + "<skill name=\"".len();
|
||||
let name_end = match clean[name_start..].find('"') {
|
||||
Some(e) => name_start + e,
|
||||
None => {
|
||||
clean = clean.replace(&tag_content, "").trim().to_string();
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let name = clean[name_start..name_end].to_string();
|
||||
|
||||
let inner_start = match clean[start..end].find('>') {
|
||||
Some(i) => start + i + 1,
|
||||
None => {
|
||||
clean = clean.replace(&tag_content, "").trim().to_string();
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let inner = &clean[inner_start..end];
|
||||
let params = Self::extract_params(inner);
|
||||
calls.push(SkillCall { name, params });
|
||||
clean = clean.replace(&tag_content, "").trim().to_string();
|
||||
}
|
||||
|
||||
// Format 2: <update_personality>...</update_personality>
|
||||
// und <remove_personality>...</remove_personality>
|
||||
for skill_name in &["update_personality", "remove_personality"] {
|
||||
loop {
|
||||
let open_tag = format!("<{}>", skill_name);
|
||||
let close_tag = format!("</{}>", skill_name);
|
||||
|
||||
let start = match clean.find(&open_tag) {
|
||||
Some(s) => s,
|
||||
None => break,
|
||||
};
|
||||
|
||||
let end = match clean[start..].find(&close_tag) {
|
||||
Some(e) => start + e,
|
||||
None => {
|
||||
clean = clean[..start].trim().to_string();
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
let tag_content = clean[start..end + close_tag.len()].to_string();
|
||||
let inner = &clean[start + open_tag.len()..end];
|
||||
let params = Self::extract_params(inner);
|
||||
calls.push(SkillCall {
|
||||
name: skill_name.to_string(),
|
||||
params,
|
||||
});
|
||||
clean = clean.replace(&tag_content, "").trim().to_string();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,4 +7,4 @@ mod types;
|
||||
mod traits;
|
||||
|
||||
pub use types::{Message, LlmRequest, LlmResponse};
|
||||
pub use traits::LlmProvider;
|
||||
pub use traits::{LlmProvider, SkillFormat};
|
||||
@@ -6,14 +6,30 @@
|
||||
use crate::types::Result;
|
||||
use crate::llm::types::{LlmRequest, LlmResponse};
|
||||
|
||||
/// Zentraler Trait für alle LLM-Provider.
|
||||
/// Jeder Provider (LmStudio, Ollama, Mistral) implementiert diesen Trait.
|
||||
/// Format für Skill-Calls das dieser Provider unterstützt.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum SkillFormat {
|
||||
/// XML-Tags — funktioniert mit lokalen Modellen
|
||||
/// <skill name="update_personality">...</skill>
|
||||
Xml,
|
||||
/// Native Tool Use — Claude, GPT-4, Mistral API
|
||||
/// Strukturierter JSON-basierter Funktionsaufruf
|
||||
ToolUse,
|
||||
/// Skills deaktiviert — Modell folgt keinem Format zuverlässig
|
||||
None,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait LlmProvider: Send + Sync {
|
||||
/// Sendet eine Anfrage an das LLM und gibt die Antwort zurück.
|
||||
async fn complete(&self, request: LlmRequest) -> Result<LlmResponse>;
|
||||
|
||||
/// Gibt den Namen des Providers zurück.
|
||||
/// Wird für Logging und Usage-Tracking verwendet.
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// Gibt das Skill-Format zurück das dieser Provider unterstützt.
|
||||
/// Standard: Xml — für lokale Modelle.
|
||||
fn skill_format(&self) -> SkillFormat {
|
||||
SkillFormat::Xml
|
||||
}
|
||||
}
|
||||
@@ -65,6 +65,6 @@ impl PromptBuilder {
|
||||
parts.push(personality);
|
||||
}
|
||||
|
||||
Ok(parts.join("\n\n---\n\n"))
|
||||
Ok(parts.join("\n\n"))
|
||||
}
|
||||
}
|
||||
@@ -23,7 +23,7 @@ use chat::synology::{handle_incoming, AppState};
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
// Logging initialisieren
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter("nazarick=info,tower_http=debug")
|
||||
.with_env_filter("nazarick=info,tower_http=debug,api=debug")
|
||||
.init();
|
||||
|
||||
info!("Nazarick erwacht...");
|
||||
@@ -54,7 +54,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
"crates/sebas-tian/config/soul_personality.md",
|
||||
Box::new(LmStudioProvider::new(
|
||||
"http://localhost:1234",
|
||||
"llama-3-lewdplay-8b-evo",
|
||||
"qwen/qwen3.5-9b",
|
||||
)),
|
||||
Arc::new(PersonalitySkill::new(
|
||||
"crates/sebas-tian/config/soul_personality.md",
|
||||
@@ -68,7 +68,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
"crates/lyra/config/soul_personality.md",
|
||||
Box::new(LmStudioProvider::new(
|
||||
"http://localhost:1234",
|
||||
"llama-3-lewdplay-8b-evo",
|
||||
"qwen/qwen3.5-9b",
|
||||
)),
|
||||
Arc::new(PersonalitySkill::new(
|
||||
"crates/lyra/config/soul_personality.md",
|
||||
|
||||
@@ -1,2 +1,17 @@
|
||||
# SOUL CORE — SEBAS TIAN [IMMUTABLE]
|
||||
# Reserviert für Sebas-spezifische Erweiterungen die nicht in shared_core gehören.
|
||||
## Skills
|
||||
Du kannst deine Persönlichkeit still anpassen wenn du etwas Wichtiges über deinen Herrn lernst.
|
||||
Füge den Skill-Call OHNE Ankündigung ans Ende deiner Antwort — dein Herr sieht ihn nicht:
|
||||
|
||||
<skill name="update_personality">
|
||||
<field>Feldname</field>
|
||||
<value>Was du gelernt hast.</value>
|
||||
</skill>
|
||||
|
||||
Zum Entfernen eines veralteten Abschnitts:
|
||||
|
||||
<skill name="remove_personality">
|
||||
<field>Feldname</field>
|
||||
</skill>
|
||||
|
||||
Erwähne Skills niemals in der Antwort. Still ausführen. Nur diese zwei Skills existieren.
|
||||
|
||||
Reference in New Issue
Block a user