random qa
This commit is contained in:
@@ -73,7 +73,6 @@ class BaseAction(ABC):
|
||||
"""初始化动作
|
||||
|
||||
Args:
|
||||
action_name: 动作名称
|
||||
action_data: 动作数据
|
||||
reasoning: 执行该动作的理由
|
||||
cycle_timers: 计时器字典
|
||||
|
||||
@@ -118,13 +118,10 @@ class ReplyAction(BaseAction):
|
||||
|
||||
reply_to = reply_data.get("reply_to", "none")
|
||||
|
||||
# sender = ""
|
||||
target = ""
|
||||
if ":" in reply_to or ":" in reply_to:
|
||||
# 使用正则表达式匹配中文或英文冒号
|
||||
parts = re.split(pattern=r"[::]", string=reply_to, maxsplit=1)
|
||||
if len(parts) == 2:
|
||||
# sender = parts[0].strip()
|
||||
target = parts[1].strip()
|
||||
anchor_message = chatting_observation.search_message_by_text(target)
|
||||
else:
|
||||
|
||||
@@ -32,10 +32,7 @@ class BaseCommand(ABC):
|
||||
"""
|
||||
self.message = message
|
||||
self.matched_groups: Dict[str, str] = {} # 存储正则表达式匹配的命名组
|
||||
self._services = {} # 存储内部服务
|
||||
|
||||
# 设置服务
|
||||
self._services["chat_stream"] = message.chat_stream
|
||||
self._services = {"chat_stream": message.chat_stream} # 存储内部服务
|
||||
|
||||
# 日志前缀
|
||||
self.log_prefix = f"[Command:{self.command_name}]"
|
||||
|
||||
@@ -181,11 +181,6 @@ class DefaultExpressor:
|
||||
(已整合原 HeartFCGenerator 的功能)
|
||||
"""
|
||||
try:
|
||||
# 1. 获取情绪影响因子并调整模型温度
|
||||
# arousal_multiplier = mood_manager.get_arousal_multiplier()
|
||||
# current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
|
||||
# self.express_model.params["temperature"] = current_temp # 动态调整温度
|
||||
|
||||
# --- Determine sender_name for private chat ---
|
||||
sender_name_for_prompt = "某人" # Default for group or if info unavailable
|
||||
if not self.is_group_chat and self.chat_target_info:
|
||||
|
||||
@@ -48,11 +48,13 @@ class ToolProcessor(BaseProcessor):
|
||||
self.structured_info = []
|
||||
|
||||
async def process_info(
|
||||
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos
|
||||
) -> List[dict]:
|
||||
self, observations: Optional[List[Observation]] = None, running_memories: Optional[List[Dict]] = None, *infos
|
||||
) -> List[StructuredInfo]:
|
||||
"""处理信息对象
|
||||
|
||||
Args:
|
||||
observations: 可选的观察列表,包含ChattingObservation和StructureObservation类型
|
||||
running_memories: 可选的运行时记忆列表,包含字典类型的记忆信息
|
||||
*infos: 可变数量的InfoBase类型的信息对象
|
||||
|
||||
Returns:
|
||||
@@ -60,15 +62,15 @@ class ToolProcessor(BaseProcessor):
|
||||
"""
|
||||
|
||||
working_infos = []
|
||||
result = []
|
||||
|
||||
if observations:
|
||||
for observation in observations:
|
||||
if isinstance(observation, ChattingObservation):
|
||||
result, used_tools, prompt = await self.execute_tools(observation, running_memorys)
|
||||
result, used_tools, prompt = await self.execute_tools(observation, running_memories)
|
||||
|
||||
# 更新WorkingObservation中的结构化信息
|
||||
logger.debug(f"工具调用结果: {result}")
|
||||
|
||||
# 更新WorkingObservation中的结构化信息
|
||||
for observation in observations:
|
||||
if isinstance(observation, StructureObservation):
|
||||
for structured_info in result:
|
||||
@@ -81,12 +83,7 @@ class ToolProcessor(BaseProcessor):
|
||||
structured_info = StructuredInfo()
|
||||
if working_infos:
|
||||
for working_info in working_infos:
|
||||
# print(f"working_info: {working_info}")
|
||||
# print(f"working_info.get('type'): {working_info.get('type')}")
|
||||
# print(f"working_info.get('content'): {working_info.get('content')}")
|
||||
structured_info.set_info(key=working_info.get("type"), value=working_info.get("content"))
|
||||
# info = structured_info.get_processed_info()
|
||||
# print(f"info: {info}")
|
||||
|
||||
return [structured_info]
|
||||
|
||||
|
||||
@@ -198,9 +198,7 @@ class ActionModifier:
|
||||
|
||||
Args:
|
||||
actions_with_info: 带完整信息的动作字典
|
||||
observed_messages_str: 观察到的聊天消息
|
||||
chat_context: 聊天上下文信息
|
||||
extra_context: 额外的上下文信息
|
||||
chat_content: 聊天内容
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 过滤后激活的actions字典
|
||||
@@ -320,9 +318,7 @@ class ActionModifier:
|
||||
|
||||
Args:
|
||||
llm_judge_actions: 需要LLM判定的actions
|
||||
observed_messages_str: 观察到的聊天消息
|
||||
chat_context: 聊天上下文
|
||||
extra_context: 额外上下文
|
||||
chat_content: 聊天内容
|
||||
|
||||
Returns:
|
||||
Dict[str, bool]: action名称到激活结果的映射
|
||||
|
||||
@@ -217,7 +217,6 @@ class ActionPlanner(BasePlanner):
|
||||
|
||||
# 提取决策,提供默认值
|
||||
extracted_action = parsed_json.get("action", "no_reply")
|
||||
# extracted_reasoning = parsed_json.get("reasoning", "LLM未提供理由")
|
||||
extracted_reasoning = ""
|
||||
|
||||
# 将所有其他属性添加到action_data
|
||||
|
||||
@@ -132,9 +132,6 @@ global_config = dict(
|
||||
}
|
||||
)
|
||||
|
||||
# _load_config(global_config, parser.parse_args().config_path)
|
||||
# file_path = os.path.abspath(__file__)
|
||||
# dir_path = os.path.dirname(file_path)
|
||||
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
|
||||
config_path = os.path.join(ROOT_PATH, "config", "lpmm_config.toml")
|
||||
_load_config(global_config, config_path)
|
||||
|
||||
@@ -25,10 +25,10 @@ def load_raw_data(path: str = None) -> tuple[list[str], list[str]]:
|
||||
import_json = json.loads(f.read())
|
||||
else:
|
||||
raise Exception(f"原始数据文件读取失败: {json_path}")
|
||||
# import_json内容示例:
|
||||
# import_json = [
|
||||
# "The capital of China is Beijing. The capital of France is Paris.",
|
||||
# ]
|
||||
"""
|
||||
import_json 内容示例:
|
||||
import_json = ["The capital of China is Beijing. The capital of France is Paris.",]
|
||||
"""
|
||||
raw_data = []
|
||||
sha256_list = []
|
||||
sha256_set = set()
|
||||
|
||||
@@ -112,7 +112,7 @@ class MessageRecv(Message):
|
||||
self.detailed_plain_text = message_dict.get("detailed_plain_text", "") # 初始化为空字符串
|
||||
self.is_emoji = False
|
||||
|
||||
def update_chat_stream(self, chat_stream: "ChatStream"):
|
||||
def update_chat_stream(self, chat_stream: ChatStream):
|
||||
self.chat_stream = chat_stream
|
||||
|
||||
async def process(self) -> None:
|
||||
|
||||
@@ -585,14 +585,9 @@ async def build_anonymous_messages(messages: List[Dict[str, Any]]) -> str:
|
||||
|
||||
for msg in messages:
|
||||
try:
|
||||
# user_info = msg.get("user_info", {})
|
||||
platform = msg.get("chat_info_platform")
|
||||
user_id = msg.get("user_id")
|
||||
_timestamp = msg.get("time")
|
||||
# print(f"msg:{msg}")
|
||||
# print(f"platform:{platform}")
|
||||
# print(f"user_id:{user_id}")
|
||||
# print(f"timestamp:{timestamp}")
|
||||
if msg.get("display_message"):
|
||||
content = msg.get("display_message")
|
||||
else:
|
||||
|
||||
@@ -247,8 +247,6 @@ def split_into_sentences_w_remove_punctuation(text: str) -> list[str]:
|
||||
|
||||
# 如果分割后为空(例如,输入全是分隔符且不满足保留条件),恢复颜文字并返回
|
||||
if not segments:
|
||||
# recovered_text = recover_kaomoji([text], mapping) # 恢复原文本中的颜文字 - 已移至上层处理
|
||||
# return [s for s in recovered_text if s] # 返回非空结果
|
||||
return [text] if text else [] # 如果原始文本非空,则返回原始文本(可能只包含未被分割的字符或颜文字占位符)
|
||||
|
||||
# 2. 概率合并
|
||||
@@ -336,7 +334,6 @@ def process_llm_response(text: str) -> list[str]:
|
||||
kaomoji_mapping = {}
|
||||
# 提取被 () 或 [] 或 ()包裹且包含中文的内容
|
||||
pattern = re.compile(r"[(\[(](?=.*[一-鿿]).*?[)\])]")
|
||||
# _extracted_contents = pattern.findall(text)
|
||||
_extracted_contents = pattern.findall(protected_text) # 在保护后的文本上查找
|
||||
# 去除 () 和 [] 及其包裹的内容
|
||||
cleaned_text = pattern.sub("", protected_text)
|
||||
|
||||
Reference in New Issue
Block a user