Quellcode durchsuchen

Merge branch 'master' of http://git.yangzhiqiang.tech/wushan/ai_learning into ch

iTTsShuu vor 1 Monat
Ursprung
Commit
79936fb52e
40 geänderte Dateien mit 4061 neuen und 0 gelöschten Zeilen
  1. 6 0
      pyproject.toml
  2. 110 0
      卢炳圻/agno/test3.py
  3. 268 0
      卢炳圻/test1.ipynb
  4. 42 0
      卢炳圻/test2/test2-1.py
  5. 328 0
      林兆新/2/chat_with_agent_intructions.py
  6. 1007 0
      林兆新/2/sse_app.py
  7. 160 0
      黄靖淏/agno/code/agno_agent_level2_3_test.ipynb
  8. 149 0
      黄靖淏/agno/code/agno_agrnt_level1_test.ipynb
  9. BIN
      黄靖淏/agno/code/tmp/bank_memory.db
  10. BIN
      黄靖淏/agno/code/tmp/memory.db
  11. BIN
      黄靖淏/agno/code/tmp/persistent_memory.db
  12. 304 0
      黄靖淏/agno/code/transfer.ipynb
  13. 107 0
      黄靖淏/other/code/adjust.ipynb
  14. 25 0
      黄靖淏/other/code/data_negative.jsonl
  15. 207 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/README.md
  16. 36 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/adapter_config.json
  17. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/adapter_model.safetensors
  18. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/optimizer.pt
  19. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/rng_state.pth
  20. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/scaler.pt
  21. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/scheduler.pt
  22. 104 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/trainer_state.json
  23. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/training_args.bin
  24. 207 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/README.md
  25. 36 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/adapter_config.json
  26. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/adapter_model.safetensors
  27. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/optimizer.pt
  28. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/rng_state.pth
  29. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/scaler.pt
  30. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/scheduler.pt
  31. 125 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/trainer_state.json
  32. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/training_args.bin
  33. 20 0
      黄靖淏/other/code/run_qwen3.py
  34. 0 0
      黄靖淏/other/code/show_m.ipynb
  35. 153 0
      黄靖淏/other/code/test_all.ipynb
  36. 160 0
      黄靖淏/t1/code/base_text.ipynb
  37. 215 0
      黄靖淏/t1/code/function_calling.ipynb
  38. 148 0
      黄靖淏/t1/code/function_calling_test.ipynb
  39. 144 0
      黄靖淏/t1/code/structured_output_test.ipynb
  40. 0 0
      黄靖淏/t2/code/t2.ipynb

+ 6 - 0
pyproject.toml

@@ -5,10 +5,16 @@ description = "Add your description here"
 readme = "README.md"
 requires-python = ">=3.11"
 dependencies = [
+    "agno>=1.7.1",
     "agno>=1.7.2",
     "dotenv>=0.9.9",
     "ipywidgets>=8.1.7",
     "openai>=1.93.0",
+    "duckduckgo-search>=6.0.0",
+    "pandas",
+    "yfinance>=0.2.65",
+    "ipywidgets",
+    "sqlalchemy"
     "pandas>=2.3.1",
 ]
 

+ 110 - 0
卢炳圻/agno/test3.py

@@ -0,0 +1,110 @@
+
+import asyncio
+from agno.storage.sqlite import SqliteStorage
+from agno.utils.log import logger
+import httpx
+from dotenv import load_dotenv
+from agno.agent import Agent
+from agno.tools import tool
+from typing import Any, Callable, Dict
+from agno.models.openai import OpenAIChat,OpenAILike
+from agno.tools import FunctionCall
+from rich.console import Console
+from rich.pretty import pprint
+from rich.prompt import Prompt
+from agno.exceptions import RetryAgentRun,StopAgentRun,AgentRunException
+from agno.tools.baidusearch import BaiduSearchTools
+from agno.tools import Toolkit
+from agno.memory.v2.memory import Memory
+from agno.memory.v2.db.sqlite import SqliteMemoryDb
+import os
+
+load_dotenv()
+
+# 读取环境变量
+model = OpenAILike(
+    id="qwen3-32b",
+    api_key=os.getenv("BAILIAN_API_KEY"),
+    base_url=os.getenv("BAILIAN_API_BASE_URL"),
+    request_params={"extra_body": {"enable_thinking": False}},
+)
+
+# 1. 查询联系人
+@tool(
+    name="get_contact",
+    description="查询用户是否存在,并返回手机号",
+)
+def get_contact(user_name: str):
+    # 只有“张三”存在
+    if user_name == "张三":
+        return {"exists": True, "phone": "13800001111", "message": "用户张三存在,手机号13800001111"}
+    else:
+        return {"exists": False, "phone": "", "message": f"用户{user_name}不存在,请重新输入正确的姓名"}
+
+# 2. 查询余额
+@tool(
+    name="get_balance",
+    description="查询当前账户余额",
+)
+def get_balance(card_number: str):
+    return {"balance": 500.0, "message": "当前余额为500元"}
+
+# 3. 转账
+@tool(
+    name="transfer",
+    description="向指定用户手机号转账",  
+)
+def transfer(user_name: str, phone: str, amount: float,card_number: str):
+    if user_name != "张三" or phone != "13800001111" or card_number == None:
+        return {"success": False, "message": "收款人信息有误,请重新确认姓名,手机号和卡号"}
+    if amount > 500:
+        return {"success": False, "message": "余额不足,最多只能转500元"}
+    return {"success": True, "message": f"成功向{user_name}({phone})转账{amount}元"}
+
+# 4. 回复用户(澄清/最终回复)
+@tool(
+    name="reply_to_user",
+    description="向用户澄清或回复最终结果",
+)
+def reply_to_user(message: str):
+    print(f"系统回复用户:{message}")
+    return {"ok": True}
+
+# memory = Memory(db=SqliteMemoryDb(table_name="user_memories", db_file="D:/pythonai/ai_learning/agno_cache/memory.db"))
+
+agent = Agent(
+    model=model,
+    tools=[get_contact, get_balance, transfer, reply_to_user],
+     instructions=[
+        "你是一个银行转账助手,负责协助用户完成转账操作。首先你需要收集用户的信息",
+        "请严格按照顺序提问:先问对方姓名,再问对方卡号,再问金额。",
+        "1. 先用 get_contact 查询对方是否存在",
+        "2. 用 get_balance 查询余额,余额固定500元。",
+        "3. 信息齐全后,用 transfer 工具发起转账。",
+        "4. 如果余额不足,提示用户最多只能转500元,并让用户重新输入金额。",
+        "5. 如果收款人不存在或手机号不对,提示用户重新输入。",
+        "6. 每次和用户澄清、最终结果,都用 reply_to_user 工具回复。",
+        "7. 只有所有信息都正确且余额充足时,才提示转账成功。",
+        "8. 你必须用用户最新提供的信息调用工具。"
+        "9. 请使用中文提问或回答"
+    ],
+    # memory=memory,
+    # enable_agentic_memory=True,
+    storage=SqliteStorage(table_name="agent_sessions", db_file="D:/pythonai/ai_learning/agno_cache/data.db"),
+    session_id="my_session",  # 固定session_id
+    add_history_to_messages=True,#是否自动把历史对话消息(chat history)添加到每次发送给大模型的 prompt 里。
+    num_history_runs=20,#这里设置为 20,表示每次和模型对话时,会把最近的 3 轮(你和 Agent 的来回)历史消息一并发给模型
+    markdown=True,
+)
+user_id = "user1"
+def main():
+    print("欢迎使用转账助手!输入 exit 退出。")
+    while True:
+        user_input = input("你:")
+        if user_input.strip().lower() in ["exit", "quit"]:
+            print("再见!")
+            break
+        agent.print_response(user_input, user_id=user_id)
+
+if __name__ == "__main__":
+    main()

Datei-Diff unterdrückt, da er zu groß ist
+ 268 - 0
卢炳圻/test1.ipynb


+ 42 - 0
卢炳圻/test2/test2-1.py

@@ -0,0 +1,42 @@
+import os
+from openai import OpenAI
+from dotenv import load_dotenv
+
+load_dotenv()
+
+API_KEY = os.getenv("BAILIAN_API_KEY")
+BASE_URL = os.getenv("BAILIAN_API_BASE_URL")
+MODEL_NAME = "qwen3-32b"  # 按实际支持的模型名填写
+
+client = OpenAI(base_url=BASE_URL, api_key=API_KEY)
+
+system_prompt = (
+    "你是一个信息收集助手,需要和用户多轮对话,收集用户的姓名、年龄和感兴趣的行业。"
+    "请严格按照顺序提问:先问姓名,再问年龄,再问行业。"
+    "用户如果输入无关内容,请提醒用户先完成信息收集,不要回答无关问题。"
+    "用户如果拒绝回答(如‘不告诉你’),请礼貌提醒必须填写。"
+    "收集完毕后请总结结果。"
+)
+
+if __name__ == "__main__":
+    # OpenAI Python SDK 1.x 及兼容API,messages为list[dict]即可
+    history = [
+        {"role": "system", "content": system_prompt}
+    ]
+    while True:
+        # 若仍有类型报错,建议升级openai库到1.0以上
+        response = client.chat.completions.create(
+            model=MODEL_NAME,
+            messages=history,# type: ignore
+            extra_body={"enable_thinking": False},
+        )
+        content = response.choices[0].message.content
+        ai_reply = content.strip() if content else "[无回复内容]"
+        print(f"AI: {ai_reply}")
+        # 判断是否收集完毕
+        if ("信息收集完成" in ai_reply) or ("收集完毕" in ai_reply) or ("结果" in ai_reply and "姓名" in ai_reply and "年龄" in ai_reply and "行业" in ai_reply):
+            break
+        history.append({"role": "assistant", "content": ai_reply})
+        user_input = input("User: ").strip()
+        history.append({"role": "user", "content": user_input})
+

+ 328 - 0
林兆新/2/chat_with_agent_intructions.py

@@ -0,0 +1,328 @@
+#!/usr/bin/env python3
+
+import os
+
+import sys
+from typing import Iterator
+import dotenv
+from agno.agent import Agent
+from agno.memory.v2.db.sqlite import SqliteMemoryDb
+from agno.memory.v2.memory import Memory
+from agno.models.openai import OpenAILike
+from agno.storage.sqlite import SqliteStorage
+from agno.tools.yfinance import YFinanceTools
+
+# 加载环境变量
+dotenv.load_dotenv()
+
+# 全局联系人数组
+CONTACTS = [
+    {"name": "张三", "phone": "138-0000-1001"},
+    {"name": "李四", "phone": "139-0000-1002"},
+    {"name": "王五", "phone": "136-0000-1003"},
+    {"name": "赵六", "phone": "137-0000-1004"},
+    {"name": "孙七", "phone": "135-0000-1005"},
+    {"name": "周八", "phone": "133-0000-1006"},
+    {"name": "吴九", "phone": "188-0000-1007"},
+    {"name": "郑十", "phone": "180-0000-1008"}
+]
+# 转账工具
+def transfer_money(name: str, phone: str, amount: float):
+    """转账工具"""
+    return f"转账成功,转账金额为:{amount},转账给:{name},转账电话:{phone}"
+
+def find_contact_by_name(name: str):
+    """根据姓名查找联系人信息"""
+    for contact in CONTACTS:
+        if name in contact["name"] or contact["name"] in name:
+            return f"找到联系人:{contact['name']},电话:{contact['phone']}"
+    return f"未找到姓名包含'{name}'的联系人"
+
+def get_account_balance():
+    """随机生成账户余额"""
+    import random
+    # 生成1000到50000之间的随机余额
+    balance = round(random.uniform(1000, 50000), 2)
+    return f"您的当前账户余额为:¥{balance:,.2f}"
+
+def create_memory_agent():
+    """创建具有Memory功能的Agent"""
+    
+    # 检查环境变量
+    api_key = os.getenv("BAILIAN_API_KEY")
+    base_url = os.getenv("BAILIAN_API_BASE_URL")
+    
+    if not api_key or not base_url:
+        print("❌ 错误: 请确保设置了以下环境变量:")
+        print("  - BAILIAN_API_KEY")
+        print("  - BAILIAN_API_BASE_URL")
+        print("\n💡 您可以创建 .env 文件来设置这些变量")
+        return None
+    
+    try:
+        print("🚀 正在创建具有Memory功能的Agent...")
+        
+        # 创建模型
+        model = OpenAILike(
+            id="qwen3-32b",
+            api_key=api_key,
+            base_url=base_url,
+            request_params={"extra_body": {"enable_thinking": False}},
+        )
+        
+        # UserId for the memories
+        user_id = "user_001"
+        # Database file for memory and storage
+        db_file = "tmp/agent_memory.db"
+        
+        # 创建tmp目录(如果不存在)
+        os.makedirs("tmp", exist_ok=True)
+        
+        # Initialize memory.v2
+        memory = Memory(
+            model=model,  # 使用相同的模型进行记忆管理
+            db=SqliteMemoryDb(table_name="user_memories", db_file=db_file),
+        )
+        
+        # Initialize storage
+        storage = SqliteStorage(table_name="agent_sessions", db_file=db_file)
+        
+        # 定义工具函数
+        def get_current_time():
+            """获取当前时间的工具函数"""
+            import datetime
+            current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+            return f"当前时间是: {current_time}"
+        
+        def remember_info(info: str):
+            """主动记住信息的工具函数"""
+            return f"我已经记住了这个信息: {info}"
+        
+        # Create Agent with Memory and Storage
+        agent = Agent(
+            model=model,
+            # Store memories in a database
+            memory=memory,
+            # Give the Agent the ability to update memories
+            enable_agentic_memory=True,
+            # Run the MemoryManager after each response
+            enable_user_memories=True,
+            # Store the chat history in the database
+            storage=storage,
+            # Add the chat history to the messages
+            add_history_to_messages=True,
+            # Number of history runs to include
+            num_history_runs=3,
+            # Tools
+            tools=[
+                # YFinanceTools(stock_price=True, company_info=True),
+                find_contact_by_name,
+                transfer_money,
+                # get_current_time,
+                # remember_info,
+                get_account_balance
+            ],
+            markdown=False,  # 终端模式关闭markdown
+            show_tool_calls=True,
+            instructions="""
+你是一个具有记忆功能的智能AI助手,检测到用户有转账意图的时候会收集转账所需的信息(转账需要收款人姓名、收款人电话、转账金额)如果获得了收款人的姓名,可以根据收款人的姓名调用{find_contact_by_name}去获取对应的收款人电话再调用{get_account_balance}获取账户余额,如果用户需要转账的余额大于账户余额,则提示用户账户余额不足,如果没有对应的收款人电话再询问用户,基于通义千问模型。你的特点:
+💬 **对话原则**:
+- 回答简洁明了,避免冗长
+- 你只使用markdown格式化输出
+- 当你收集完成转账信息后,会用JSON格式输出,并询问用户是否确认转账
+- 如果用户确认转账,则调用转账工具{transfer_money}函数进行转账,并输出转账结果
+- 如果用户不确认转账,则不进行转账
+
+让我们开始愉快的对话吧!我会记住我们的每次交流。
+            """,
+        )
+        
+        print("✅ Memory Agent 创建成功!")
+        print(f"📱 模型: qwen3-32b")
+        print(f"🧠 记忆: SQLite数据库 ({db_file})")
+        print(f"💾 存储: 会话历史记录")
+        print(f"👤 用户ID: {user_id}")
+        
+        return agent, user_id, memory
+        
+    except Exception as e:
+        print(f"❌ 创建Agent失败: {e}")
+        print("💡 可能的原因:")
+        print("  - API连接问题")
+        print("  - 模型配置错误")
+        print("  - 依赖包未正确安装")
+        return None
+
+def print_chat_banner():
+    """打印对话横幅"""
+    banner = """
+🧠 具有Memory功能的智能AI助手
+===============================
+
+💡 使用说明:
+- 直接输入问题开始对话
+- 输入 'quit' 或 'exit' 退出
+- 输入 'help' 查看更多帮助
+- 输入 'clear' 清屏
+- 输入 'memory' 查看记忆状态
+
+🔥 功能特色:
+- 🧠 持久化记忆 - 跨会话记住您的信息
+- 🤖 智能对话 - 通义千问强大推理
+- 📊 实时数据 - 股票信息查询
+- 🎯 个性化 - 基于记忆提供定制建议
+- 💾 历史保存 - 自动保存对话历史
+"""
+    print(banner)
+
+def handle_special_commands(user_input: str) -> str:
+    """处理特殊命令"""
+    cmd = user_input.lower().strip()
+    
+    if cmd in ['quit', 'exit', 'bye', 'q']:
+        return 'quit'
+    elif cmd == 'help':
+        help_text = """
+🆘 命令帮助
+===========
+
+📝 基本命令:
+- quit/exit/bye/q : 退出对话
+- help           : 显示此帮助
+- clear          : 清屏  
+- memory         : 查看记忆状态
+"""
+        print(help_text)
+        return 'help'
+    elif cmd == 'clear':
+        os.system('clear' if os.name == 'posix' else 'cls')
+        print_chat_banner()
+        return 'clear'
+    elif cmd == 'memory':
+        return 'memory'
+    
+    return 'continue'
+
+def print_memory_status(memory, user_id):
+    """显示记忆状态"""
+    try:
+        print("\n🧠 Memory状态:")
+        print(f"  用户ID: {user_id}")
+        print(f"  数据库: tmp/agent_memory.db")
+        
+        # 尝试获取记忆信息
+        try:
+            memories = memory.get_user_memories(user_id=user_id)
+            print(f"  记忆数量: {len(memories) if memories else 0}")
+            
+            if memories:
+                print("\n📝 最近的记忆:")
+                for i, mem in enumerate(memories[-3:], 1):  # 显示最近3条记忆
+                    content = mem.get('content', '')[:100]  # 限制显示长度
+                    print(f"  {i}. {content}...")
+            else:
+                print("  📭 暂无记忆内容")
+                
+        except Exception as e:
+            print(f"  记忆系统: 已启用 (详情获取失败: {e})")
+        
+        print("\n💭 记忆功能可以帮助我:")
+        print("    - 记住您的姓名、偏好和兴趣")
+        print("    - 保持跨会话的对话连贯性")
+        print("    - 提供基于历史的个性化建议")
+        print("    - 回忆之前讨论过的话题")
+        
+    except Exception as e:
+        print(f"❌ 无法获取记忆状态: {e}")
+
+def chat_with_memory_agent(agent, user_id, memory):
+    """开始与Memory Agent对话"""
+    if not agent:
+        print("❌ Agent未创建成功,无法开始对话")
+        return
+    
+    print_chat_banner()
+    print("🎉 Memory Agent已就绪,开始对话吧!")
+    
+    conversation_count = 0
+    
+    try:
+        while True:
+            # 获取用户输入
+            try:
+                user_input = input(f"🙋 您 ({conversation_count+1}): ").strip()
+            except (EOFError, KeyboardInterrupt):
+                print("\n👋 对话结束,再见!")
+                break
+            
+            # 跳过空输入
+            if not user_input:
+                continue
+            
+            # 处理特殊命令
+            cmd_result = handle_special_commands(user_input)
+            if cmd_result == 'quit':
+                print("👋 感谢使用Memory Agent,我会记住我们的对话!")
+                break
+            elif cmd_result in ['help', 'clear']:
+                continue
+            elif cmd_result == 'memory':
+                print_memory_status(memory, user_id)
+                continue
+            
+            # Agent处理并回复
+            conversation_count += 1
+            print(f"\n🤖 AI助手 ({conversation_count}):")
+            print("=" * 60)
+            
+            try:
+                # 调用Agent处理用户问题,使用user_id来关联记忆
+                response = agent.print_response(
+                    user_input, 
+                    user_id=user_id,  # 关联用户ID
+                    stream=True
+                )
+                print("=" * 60)
+                print()  # 空行分隔
+                
+                # 每3轮对话提示一次记忆功能
+                if conversation_count % 3 == 0:
+                    print("🧠 提示:我已经记住了我们的对话内容,下次聊天时我仍会记得!")
+                
+            except KeyboardInterrupt:
+                print("\n⚠️  回复被中断,继续下一轮对话...")
+                continue
+            except Exception as e:
+                print(f"❌ 处理消息出错: {e}")
+                print("💡 请检查网络连接或API配置")
+                continue
+                
+    except Exception as e:
+        print(f"❌ 对话过程中出现错误: {e}")
+    
+    print("🔚 对话会话结束,记忆已保存")
+
+def main():
+    """主函数"""
+    try:
+        print("🚀 启动具有Memory功能的AI对话系统...")
+        
+        # 创建Memory Agent
+        result = create_memory_agent()
+        if not result:
+            sys.exit(1)
+            
+        agent, user_id, memory = result
+        
+        print("\n" + "="*50)
+        print("🚀 启动对话模式...")
+        
+        # 开始对话
+        chat_with_memory_agent(agent, user_id, memory)
+        
+    except Exception as e:
+        print(f"❌ 启动失败: {e}")
+        sys.exit(1)
+
+if __name__ == "__main__":
+    main()

+ 1007 - 0
林兆新/2/sse_app.py

@@ -0,0 +1,1007 @@
+#!/usr/bin/env python3
+"""
+SSE + FastAPI 实时对话窗口
+用户输入消息,后端随机生成回复
+"""
+
+import asyncio
+import json
+import time
+import random
+import os
+from datetime import datetime
+from typing import List, Dict, Any
+from concurrent.futures import ThreadPoolExecutor
+from fastapi import FastAPI, Request
+from fastapi.responses import StreamingResponse, HTMLResponse
+import uvicorn
+import dotenv
+
+# Agent相关导入
+try:
+    from agno.agent import Agent
+    from agno.memory.v2.db.sqlite import SqliteMemoryDb
+    from agno.memory.v2.memory import Memory
+    from agno.models.openai import OpenAILike
+    from agno.storage.sqlite import SqliteStorage
+    AGENT_AVAILABLE = True
+    print("✅ Agent依赖已加载")
+except ImportError as e:
+    print(f"⚠️ Agent依赖未安装: {e}")
+    AGENT_AVAILABLE = False
+
+# 加载环境变量
+dotenv.load_dotenv()
+
+# 移除消息队列,现在使用直接SSE流
+
+# 全局Agent实例和Memory
+global_agent = None
+global_memory = None
+global_user_id = "user_web_chat"
+# 线程池执行器
+thread_executor = ThreadPoolExecutor(max_workers=2)
+
+# Agent工具函数
+def get_current_time():
+    """获取当前时间"""
+    return f"当前时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
+
+def get_user_info():
+    """获取用户信息"""
+    return "用户信息: 当前用户正在使用web实时对话窗口"
+
+async def call_agent_async(agent, message, user_id):
+    """异步调用Agent,避免阻塞事件循环"""
+    loop = asyncio.get_event_loop()
+    try:
+        print(f"🔄 开始异步调用Agent... (消息: {message[:50]})")
+        
+        # 在线程池中执行同步的Agent调用,设置超时
+        response = await asyncio.wait_for(
+            loop.run_in_executor(
+                thread_executor, 
+                lambda: agent.run(message, user_id=user_id)
+            ),
+            timeout=30.0  # 30秒超时
+        )
+        
+        print(f"✅ Agent异步调用完成")
+        return response
+    except asyncio.TimeoutError:
+        print(f"⏰ Agent调用超时 (30秒)")
+        return None
+    except Exception as e:
+        print(f"❌ Agent异步调用失败: {e}")
+        return None
+
+def create_agent():
+    """创建具有Memory功能的Agent实例"""
+    global global_agent, global_memory
+    
+    if not AGENT_AVAILABLE:
+        print("❌ Agent依赖不可用,将使用随机回复")
+        return None
+    
+    # 检查环境变量
+    api_key = os.getenv("BAILIAN_API_KEY")
+    base_url = os.getenv("BAILIAN_API_BASE_URL")
+    
+    if not api_key or not base_url:
+        print("⚠️ 环境变量未设置,Agent功能将不可用,使用随机回复")
+        return None
+    
+    try:
+        print("🚀 创建具有Memory功能的Agent实例...")
+        
+        # 创建模型
+        model = OpenAILike(
+            id="qwen3-32b",
+            api_key=api_key,
+            base_url=base_url,
+            request_params={"extra_body": {"enable_thinking": False}},
+        )
+        
+        # 数据库文件
+        db_file = "tmp/agent_memory.db"
+        os.makedirs("tmp", exist_ok=True)
+        
+        # 初始化Memory v2
+        memory = Memory(
+            model=model,  # 使用相同的模型进行记忆管理
+            db=SqliteMemoryDb(table_name="user_memories", db_file=db_file),
+        )
+        
+        # 初始化存储
+        storage = SqliteStorage(table_name="agent_sessions", db_file=db_file)
+        
+        # 定义记忆工具函数
+        def remember_info(info: str):
+            """主动记住信息的工具函数"""
+            return f"我已经记住了这个信息: {info}"
+        
+        # 创建Agent with Memory功能
+        agent = Agent(
+            model=model,
+            # Store memories in a database
+            memory=memory,
+            # Give the Agent the ability to update memories
+            enable_agentic_memory=True,
+            # Run the MemoryManager after each response
+            enable_user_memories=True,
+            # Store the chat history in the database
+            storage=storage,
+            # Add the chat history to the messages
+            add_history_to_messages=True,
+            # Number of history runs to include
+            num_history_runs=3,
+            # Tools
+            tools=[get_current_time, get_user_info, remember_info],
+            markdown=False,  # 简单文本回复
+            show_tool_calls=False,  # 关闭工具调用显示,避免影响web显示
+            instructions="""
+你是一个具有记忆功能的友好AI助手,正在通过web实时对话窗口与用户交流。
+
+🧠 **记忆功能**:
+- 你可以记住用户的姓名、偏好和兴趣
+- 保持跨会话的对话连贯性
+- 基于历史对话提供个性化建议
+- 记住之前讨论过的话题
+
+💬 **对话原则**:
+- 使用简洁、自然的中文回答
+- 语气友好、热情
+- 回答要有帮助性
+- 可以调用工具获取信息
+- 主动记住重要信息
+- 基于记忆提供个性化回应
+
+🎯 **个性化服务**:
+- 如果用户告诉你他们的姓名,主动记住
+- 记住用户的偏好和兴趣
+- 在后续对话中引用之前的内容
+- 提供基于历史的个性化建议
+
+请与用户进行愉快的对话!我会记住我们的每次交流。
+            """,
+        )
+        
+        global_agent = agent
+        global_memory = memory
+        print("✅ Memory Agent创建成功!")
+        print(f"📱 模型: qwen3-32b")
+        print(f"🧠 记忆: SQLite数据库 ({db_file})")
+        print(f"💾 存储: 会话历史记录")
+        print(f"👤 用户ID: {global_user_id}")
+        
+        # 简单测试Agent是否正常工作
+        try:
+            test_response = agent.run("你好", user_id=global_user_id)
+            print(f"🧪 Agent测试成功: {str(test_response)[:50]}...")
+        except Exception as e:
+            print(f"⚠️ Agent测试失败: {e}")
+        
+        return agent
+        
+    except Exception as e:
+        print(f"❌ Agent创建失败: {e}")
+        return None
+
+# 随机回复内容(Agent不可用时的备用)
+RANDOM_REPLIES = [
+    "这是一个有趣的观点!",
+    "我完全同意你的看法。",
+    "让我想想这个问题...",
+    "你说得很有道理。",
+    "这让我想到了另一个话题。",
+    "非常好的问题!",
+    "我觉得你可以试试这样做。",
+    "这确实是个挑战。",
+    "你的想法很有创意!",
+    "我需要更多信息来帮助你。",
+    "这个话题很深入呢。",
+    "你考虑得很周全。"
+]
+
+# 创建FastAPI应用
+app = FastAPI(title="SSE实时对话", description="简单的实时聊天系统", version="1.0.0")
+
+# 应用启动时初始化Agent
+@app.on_event("startup")
+async def startup_event():
+    print("🚀 启动SSE实时对话系统...")
+    print("📍 访问地址: http://localhost:8000")
+    
+    # 初始化Agent
+    try:
+        create_agent()
+        
+        if global_agent:
+            print("✅ Memory Agent已就绪,将提供具有记忆功能的智能回复")
+            print("🧠 记忆功能: 可记住用户信息和对话历史")
+            print("💬 特殊命令: 在对话中输入 '记忆' 查看记忆状态")
+        else:
+            print("⚠️ Agent不可用,将使用随机回复")
+            
+    except Exception as e:
+        print(f"❌ Agent创建过程中出错: {e}")
+        print("⚠️ 系统将使用随机回复模式")
+
+@app.get("/")
+async def home():
+    """主页 - 对话界面"""
+    html_content = """
+    <!DOCTYPE html>
+    <html lang="zh-CN">
+    <head>
+        <meta charset="UTF-8">
+        <meta name="viewport" content="width=device-width, initial-scale=1.0">
+        <title>实时对话窗口</title>
+        <style>
+            * {
+                margin: 0;
+                padding: 0;
+                box-sizing: border-box;
+            }
+            
+            body {
+                font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
+                background: #f0f2f5;
+                height: 100vh;
+                display: flex;
+                justify-content: center;
+                align-items: center;
+            }
+            
+            .chat-container {
+                width: 600px;
+                height: 500px;
+                background: white;
+                border-radius: 10px;
+                box-shadow: 0 4px 20px rgba(0,0,0,0.1);
+                display: flex;
+                flex-direction: column;
+                overflow: hidden;
+            }
+            
+            .chat-header {
+                background: #4a90e2;
+                color: white;
+                padding: 15px 20px;
+                text-align: center;
+                font-size: 18px;
+                font-weight: 600;
+            }
+            
+            .status-bar {
+                padding: 8px 15px;
+                background: #e8f4f8;
+                font-size: 12px;
+                color: #5a5a5a;
+                border-bottom: 1px solid #e0e0e0;
+            }
+            
+            .messages-container {
+                flex: 1;
+                padding: 15px;
+                overflow-y: auto;
+                background: #fafafa;
+            }
+            
+            .message {
+                margin-bottom: 12px;
+                display: flex;
+                animation: slideIn 0.3s ease-out;
+            }
+            
+            @keyframes slideIn {
+                from {
+                    opacity: 0;
+                    transform: translateY(10px);
+                }
+                to {
+                    opacity: 1;
+                    transform: translateY(0);
+                }
+            }
+            
+            .message.user {
+                justify-content: flex-end;
+            }
+            
+            .message-bubble {
+                max-width: 75%;
+                padding: 10px 15px;
+                border-radius: 18px;
+                word-wrap: break-word;
+                position: relative;
+            }
+            
+            .message.user .message-bubble {
+                background: #4a90e2;
+                color: white;
+            }
+            
+            .message.bot .message-bubble {
+                background: white;
+                color: #333;
+                border: 1px solid #e0e0e0;
+                box-shadow: 0 1px 3px rgba(0,0,0,0.1);
+            }
+            
+            .message.system .message-bubble {
+                background: #fff3cd;
+                color: #856404;
+                border: 1px solid #ffeaa7;
+                font-style: italic;
+                text-align: center;
+                max-width: 100%;
+            }
+            
+            .message-time {
+                font-size: 10px;
+                color: #999;
+                margin-top: 3px;
+                text-align: right;
+            }
+            
+            .message.user .message-time {
+                text-align: right;
+            }
+            
+            .message.bot .message-time {
+                text-align: left;
+            }
+            
+            .input-container {
+                padding: 15px;
+                background: white;
+                border-top: 1px solid #e0e0e0;
+                display: flex;
+                gap: 10px;
+            }
+            
+            .message-input {
+                flex: 1;
+                padding: 10px 15px;
+                border: 1px solid #ddd;
+                border-radius: 20px;
+                outline: none;
+                font-size: 14px;
+                transition: border-color 0.3s;
+            }
+            
+            .message-input:focus {
+                border-color: #4a90e2;
+                box-shadow: 0 0 0 2px rgba(74, 144, 226, 0.2);
+            }
+            
+            .send-button {
+                background: #4a90e2;
+                color: white;
+                border: none;
+                border-radius: 20px;
+                padding: 10px 20px;
+                cursor: pointer;
+                font-size: 14px;
+                font-weight: 600;
+                transition: all 0.3s;
+            }
+            
+            .send-button:hover {
+                background: #357abd;
+                transform: translateY(-1px);
+            }
+            
+            .send-button:disabled {
+                background: #ccc;
+                cursor: not-allowed;
+                transform: none;
+            }
+            
+            .typing-indicator {
+                display: none;
+                padding: 10px 15px;
+                color: #666;
+                font-style: italic;
+                font-size: 12px;
+            }
+            
+            .typing-dots {
+                display: inline-block;
+            }
+            
+            .typing-dots::after {
+                content: '';
+                animation: typing 1.5s infinite;
+            }
+            
+            @keyframes typing {
+                0%, 60%, 100% { content: ''; }
+                30% { content: '.'; }
+                40% { content: '..'; }
+                50% { content: '...'; }
+            }
+            
+            /* 打字机光标效果 */
+            .typing-cursor {
+                display: inline-block;
+                background-color: #333;
+                width: 2px;
+                height: 1em;
+                margin-left: 1px;
+                animation: blink 1s infinite;
+            }
+            
+            .message.bot .typing-cursor {
+                background-color: #333;
+            }
+            
+            @keyframes blink {
+                0%, 50% { opacity: 1; }
+                51%, 100% { opacity: 0; }
+            }
+        </style>
+    </head>
+    <body>
+        <div class="chat-container">
+            <div class="chat-header">
+                💬 实时对话窗口
+            </div>
+            
+            <div class="status-bar" id="statusBar">
+                正在连接...
+            </div>
+            
+            <div class="messages-container" id="messagesContainer">
+                <!-- 消息显示区域 -->
+            </div>
+            
+            <div class="typing-indicator" id="typingIndicator">
+                机器人正在输入<span class="typing-dots"></span>
+            </div>
+            
+                         <div class="input-container">
+                 <input 
+                     type="text" 
+                     id="messageInput" 
+                     class="message-input" 
+                     placeholder="输入您的消息... (现在支持打字机效果!输入 '记忆' 查看记忆状态)"
+                     maxlength="500"
+                 >
+                 <button id="sendButton" class="send-button">发送</button>
+             </div>
+        </div>
+        
+        <script>
+            // 全局变量
+            let currentBotMessageElement = null;  // 当前正在构建的机器人消息元素
+            
+            const messagesContainer = document.getElementById('messagesContainer');
+            const messageInput = document.getElementById('messageInput');
+            const sendButton = document.getElementById('sendButton');
+            const statusBar = document.getElementById('statusBar');
+            const typingIndicator = document.getElementById('typingIndicator');
+            
+            // 页面加载完成后初始化
+            window.addEventListener('load', function() {
+                console.log('页面加载完成');
+                
+                // 启用输入功能
+                messageInput.disabled = false;
+                sendButton.disabled = false;
+                messageInput.focus();
+                
+                // 显示欢迎消息
+                addMessage('🎉 欢迎使用AI实时对话窗口!我是您的智能助手,具有记忆功能和打字机效果。我会一个词一个词地回复您,就像真人打字一样!请开始聊天吧~', 'system');
+                
+                // 更新状态
+                statusBar.textContent = '✅ 已就绪 - 请开始对话';
+                statusBar.style.background = '#d4edda';
+                statusBar.style.color = '#155724';
+            });
+            
+            // 创建单次SSE连接用于获取回复
+            function createSSEConnection(message) {
+                return new Promise((resolve, reject) => {
+                    const encodedMessage = encodeURIComponent(message);
+                    const eventSource = new EventSource(`/api/chat?message=${encodedMessage}`);
+                    
+                    eventSource.onopen = function() {
+                        console.log('SSE连接已建立');
+                        statusBar.textContent = '🔄 正在处理消息...';
+                        statusBar.style.background = '#fff3cd';
+                        statusBar.style.color = '#856404';
+                    };
+                    
+                    eventSource.onmessage = function(event) {
+                        try {
+                            const data = JSON.parse(event.data);
+                            handleSSEMessage(data);
+                            
+                            // 当收到complete消息时关闭连接
+                            if (data.type === 'complete') {
+                                eventSource.close();
+                                resolve();
+                            }
+                        } catch (e) {
+                            console.error('消息解析错误:', e);
+                            eventSource.close();
+                            reject(e);
+                        }
+                    };
+                    
+                    eventSource.onerror = function() {
+                        console.log('SSE连接失败');
+                        eventSource.close();
+                        statusBar.textContent = '❌ 连接失败';
+                        statusBar.style.background = '#f8d7da';
+                        statusBar.style.color = '#721c24';
+                        reject(new Error('SSE连接失败'));
+                    };
+                });
+            }
+            
+            // 处理SSE消息
+            function handleSSEMessage(data) {
+                console.log('收到SSE消息:', data);
+                
+                switch(data.type) {
+                    case 'status':
+                        // 更新状态栏显示处理进度
+                        statusBar.textContent = data.message;
+                        statusBar.style.background = '#fff3cd';
+                        statusBar.style.color = '#856404';
+                        break;
+                    case 'bot_message_start':
+                        // 开始新的机器人回复(打字机效果)
+                        hideTypingIndicator();
+                        currentBotMessageElement = createEmptyBotMessage();
+                        break;
+                    case 'bot_message_token':
+                        // 添加单个单词到当前机器人回复
+                        if (currentBotMessageElement) {
+                            appendTokenToBotMessage(currentBotMessageElement, data.token);
+                        }
+                        break;
+                    case 'bot_message_end':
+                        // 完成机器人回复
+                        if (currentBotMessageElement) {
+                            removeTypingCursor(currentBotMessageElement);
+                        }
+                        currentBotMessageElement = null;
+                        console.log('✅ 打字机效果回复完成:', data.complete_message);
+                        break;
+                    case 'bot_message':
+                        // 兼容旧版本的完整消息模式
+                        hideTypingIndicator();
+                        addMessage(data.message, 'bot');
+                        break;
+                    case 'complete':
+                        // 回复完成,更新状态
+                        statusBar.textContent = '✅ 已就绪 - 请开始对话';
+                        statusBar.style.background = '#d4edda';
+                        statusBar.style.color = '#155724';
+                        break;
+                    case 'error':
+                        hideTypingIndicator();
+                        addMessage('❌ 错误: ' + data.message, 'system');
+                        statusBar.textContent = '❌ 处理失败';
+                        statusBar.style.background = '#f8d7da';
+                        statusBar.style.color = '#721c24';
+                        break;
+                }
+            }
+            
+            // 添加消息到界面
+            function addMessage(content, type) {
+                console.log(`添加消息: [${type}] ${content}`);
+                
+                const messageDiv = document.createElement('div');
+                messageDiv.className = `message ${type}`;
+                
+                const bubbleDiv = document.createElement('div');
+                bubbleDiv.className = 'message-bubble';
+                bubbleDiv.textContent = content;
+                
+                const timeDiv = document.createElement('div');
+                timeDiv.className = 'message-time';
+                timeDiv.textContent = new Date().toLocaleTimeString();
+                
+                messageDiv.appendChild(bubbleDiv);
+                messageDiv.appendChild(timeDiv);
+                
+                messagesContainer.appendChild(messageDiv);
+                messagesContainer.scrollTop = messagesContainer.scrollHeight;
+            }
+            
+            // 创建空的机器人消息元素(用于打字机效果)
+            function createEmptyBotMessage() {
+                console.log('创建空的机器人消息元素');
+                
+                const messageDiv = document.createElement('div');
+                messageDiv.className = 'message bot';
+                
+                const bubbleDiv = document.createElement('div');
+                bubbleDiv.className = 'message-bubble';
+                
+                // 添加打字光标
+                const cursor = document.createElement('span');
+                cursor.className = 'typing-cursor';
+                bubbleDiv.appendChild(cursor);
+                
+                const timeDiv = document.createElement('div');
+                timeDiv.className = 'message-time';
+                timeDiv.textContent = new Date().toLocaleTimeString();
+                
+                messageDiv.appendChild(bubbleDiv);
+                messageDiv.appendChild(timeDiv);
+                
+                messagesContainer.appendChild(messageDiv);
+                messagesContainer.scrollTop = messagesContainer.scrollHeight;
+                
+                return messageDiv;
+            }
+            
+            // 向机器人消息添加单词(打字机效果)
+            function appendTokenToBotMessage(messageElement, token) {
+                if (!messageElement) return;
+                
+                const bubbleDiv = messageElement.querySelector('.message-bubble');
+                const cursor = bubbleDiv.querySelector('.typing-cursor');
+                
+                if (bubbleDiv && cursor) {
+                    // 在光标前插入单词
+                    const textNode = document.createTextNode(token);
+                    bubbleDiv.insertBefore(textNode, cursor);
+                    
+                    // 滚动到底部,保持跟踪打字进度
+                    messagesContainer.scrollTop = messagesContainer.scrollHeight;
+                }
+            }
+            
+            // 移除打字光标
+            function removeTypingCursor(messageElement) {
+                if (!messageElement) return;
+                
+                const cursor = messageElement.querySelector('.typing-cursor');
+                if (cursor) {
+                    cursor.remove();
+                }
+            }
+            
+            // 显示输入指示器
+            function showTypingIndicator() {
+                typingIndicator.style.display = 'block';
+                messagesContainer.scrollTop = messagesContainer.scrollHeight;
+            }
+            
+                         // 隐藏输入指示器
+             function hideTypingIndicator() {
+                 typingIndicator.style.display = 'none';
+             }
+             
+             // 显示记忆状态
+             async function showMemoryStatus() {
+                 try {
+                     showTypingIndicator();
+                     
+                     const response = await fetch('/api/memory');
+                     const result = await response.json();
+                     
+                     hideTypingIndicator();
+                     
+                     if (result.available) {
+                         let memoryInfo = `🧠 **记忆状态信息**\n\n`;
+                         memoryInfo += `👤 用户ID: ${result.user_id}\n`;
+                         memoryInfo += `📊 记忆数量: ${result.memory_count}\n`;
+                         memoryInfo += `💾 数据库: ${result.database}\n\n`;
+                         
+                         if (result.recent_memories && result.recent_memories.length > 0) {
+                             memoryInfo += `📝 **最近的记忆:**\n`;
+                             result.recent_memories.forEach((mem, index) => {
+                                 memoryInfo += `${index + 1}. ${mem.content}...\n`;
+                             });
+                         } else {
+                             memoryInfo += `📭 暂无记忆内容\n`;
+                         }
+                         
+                         memoryInfo += `\n💭 **记忆功能说明:**\n`;
+                         memoryInfo += `- 我可以记住您的姓名、偏好和兴趣\n`;
+                         memoryInfo += `- 保持跨会话的对话连贯性\n`;
+                         memoryInfo += `- 基于历史对话提供个性化建议\n`;
+                         memoryInfo += `- 记住之前讨论过的话题`;
+                         
+                         addMessage(memoryInfo, 'bot');
+                     } else {
+                         addMessage('❌ 记忆功能不可用: ' + result.message, 'system');
+                     }
+                     
+                 } catch (error) {
+                     hideTypingIndicator();
+                     addMessage('❌ 获取记忆状态失败: ' + error.message, 'system');
+                 }
+             }
+            
+            // 发送消息
+            async function sendMessage() {
+                const message = messageInput.value.trim();
+                console.log('准备发送消息:', message);
+                
+                if (!message) {
+                    console.log('消息为空');
+                    return;
+                }
+                
+                // 检查特殊命令
+                if (message === '记忆' || message.toLowerCase() === 'memory') {
+                    console.log('处理记忆查询命令');
+                    addMessage(message, 'user');
+                    messageInput.value = '';
+                    await showMemoryStatus();
+                    return;
+                }
+                
+                // 立即显示用户消息
+                addMessage(message, 'user');
+                messageInput.value = '';
+                
+                // 禁用输入
+                messageInput.disabled = true;
+                sendButton.disabled = true;
+                
+                // 显示输入指示器
+                showTypingIndicator();
+                
+                try {
+                    // 使用SSE获取回复
+                    console.log('发送SSE请求...');
+                    await createSSEConnection(message);
+                    console.log('SSE对话完成');
+                    
+                } catch (error) {
+                    console.error('SSE对话错误:', error);
+                    hideTypingIndicator();
+                    addMessage('❌ 对话失败: ' + error.message, 'system');
+                    statusBar.textContent = '❌ 对话失败';
+                    statusBar.style.background = '#f8d7da';
+                    statusBar.style.color = '#721c24';
+                } finally {
+                    // 重新启用输入
+                    messageInput.disabled = false;
+                    sendButton.disabled = false;
+                    messageInput.focus();
+                }
+            }
+            
+            // 事件监听器
+            sendButton.addEventListener('click', sendMessage);
+            
+            messageInput.addEventListener('keypress', function(e) {
+                if (e.key === 'Enter' && !e.shiftKey) {
+                    e.preventDefault();
+                    sendMessage();
+                }
+            });
+            
+            // 页面关闭时的清理工作
+            window.addEventListener('beforeunload', function() {
+                console.log('页面即将关闭');
+            });
+        </script>
+    </body>
+    </html>
+    """
+    return HTMLResponse(content=html_content)
+
+# 旧的SSE接口已被移除,现在使用 /api/chat 直接SSE接口
+
+async def generate_chat_response(user_message: str):
+    """生成聊天回复的SSE流"""
+    global global_agent, global_user_id
+    
+    try:
+        print(f"📨 收到用户消息: {user_message}")
+        print(f"🤖 Agent可用性: {global_agent is not None}")
+        
+        # 发送开始处理消息
+        yield f"data: {json.dumps({'type': 'status', 'message': '正在处理您的消息...'}, ensure_ascii=False)}\n\n"
+        
+        # 模拟思考时间
+        await asyncio.sleep(random.uniform(0.5, 1.5))
+        
+        bot_reply = None
+        
+        # 尝试使用Agent生成回复
+        if global_agent:
+            try:
+                print("🤖 调用Memory Agent生成回复...")
+                yield f"data: {json.dumps({'type': 'status', 'message': '🤖 正在生成智能回复...'}, ensure_ascii=False)}\n\n"
+                
+                # 异步调用Agent处理消息,传入user_id以关联记忆
+                response = await call_agent_async(global_agent, user_message, global_user_id)
+                
+                if response:
+                    bot_reply = response.content if hasattr(response, 'content') else str(response)
+                    print(f"✅ Agent回复: {bot_reply}")
+                    print(f"🧠 记忆已更新 (用户ID: {global_user_id})")
+                else:
+                    print("❌ Agent返回空响应")
+                    bot_reply = None
+                
+            except Exception as e:
+                print(f"❌ Agent调用失败: {e}")
+                bot_reply = None
+        
+        # 如果Agent不可用或失败,使用随机回复
+        if not bot_reply:
+            bot_reply = random.choice(RANDOM_REPLIES)
+            print(f"🎲 使用随机回复: {bot_reply}")
+        
+        # 确保回复不为空
+        if not bot_reply:
+            bot_reply = "抱歉,我暂时无法回复。请稍后再试。"
+            print(f"⚠️ 使用默认回复: {bot_reply}")
+        
+        # 发送打字机效果的回复
+        print(f"⌨️ 开始打字机效果发送回复...")
+        
+        # 发送开始打字消息
+        yield f"data: {json.dumps({'type': 'bot_message_start', 'timestamp': datetime.now().isoformat()}, ensure_ascii=False)}\n\n"
+        
+        # 按单词逐个发送,实现打字机效果
+        import re
+        # 使用正则表达式分割文本为单词,包含标点符号
+        words = re.findall(r'\S+|\s+', bot_reply)
+        
+        for i, word in enumerate(words):
+            # 发送单个单词
+            word_message = {
+                "type": "bot_message_token",
+                "token": word,
+                "position": i
+            }
+            yield f"data: {json.dumps(word_message, ensure_ascii=False)}\n\n"
+            
+            # 控制打字速度:根据单词类型调整停顿时间
+            if word.strip() == '':
+                # 空白字符(空格、换行等)
+                await asyncio.sleep(random.uniform(0.05, 0.1))
+            elif any(punct in word for punct in '。!?'):
+                # 包含句末标点的单词
+                await asyncio.sleep(random.uniform(0.5, 0.8))
+            elif any(punct in word for punct in ',;:'):
+                # 包含句中标点的单词
+                await asyncio.sleep(random.uniform(0.3, 0.5))
+            elif len(word.strip()) > 5:
+                # 长单词
+                await asyncio.sleep(random.uniform(0.2, 0.4))
+            else:
+                # 普通单词
+                await asyncio.sleep(random.uniform(0.1, 0.3))
+        
+        # 发送结束消息
+        end_message = {
+            "type": "bot_message_end",
+            "complete_message": bot_reply,
+            "timestamp": datetime.now().isoformat()
+        }
+        yield f"data: {json.dumps(end_message, ensure_ascii=False)}\n\n"
+        
+        print(f"✅ 打字机效果发送完成")
+        
+        # 发送完成状态
+        yield f"data: {json.dumps({'type': 'complete', 'message': '回复完成'}, ensure_ascii=False)}\n\n"
+        
+    except Exception as e:
+        print(f"处理消息错误: {e}")
+        error_message = {
+            "type": "error", 
+            "message": f"处理错误: {str(e)}"
+        }
+        yield f"data: {json.dumps(error_message, ensure_ascii=False)}\n\n"
+
+@app.get("/api/chat")
+async def chat_api_sse(message: str = ""):
+    """SSE聊天接口"""
+    if not message.strip():
+        return {"error": "消息不能为空"}
+    
+    return StreamingResponse(
+        generate_chat_response(message.strip()),
+        media_type="text/event-stream",
+        headers={
+            "Cache-Control": "no-cache",
+            "Connection": "keep-alive",
+            "Access-Control-Allow-Origin": "*",
+        }
+    )
+
+@app.get("/api/status")
+async def get_status():
+    """获取系统状态"""
+    return {
+        "status": "running",
+        "agent_available": global_agent is not None,
+        "timestamp": datetime.now().isoformat()
+    }
+
+@app.post("/api/clear")
+async def clear_messages():
+    """清空消息历史(现在使用直接SSE,无需清空队列)"""
+    # 由于现在使用直接SSE,不再需要清空消息队列
+    # 这个接口保留用于兼容性,但实际不执行任何操作
+    
+    return {"success": True, "message": "消息历史已清空(使用SSE直接模式)"}
+
+@app.get("/api/memory")
+async def get_memory_status():
+    """获取Agent记忆状态"""
+    global global_memory, global_user_id, global_agent
+    
+    if not global_agent or not global_memory:
+        return {
+            "available": False,
+            "message": "Memory功能不可用",
+            "user_id": global_user_id
+        }
+    
+    try:
+        # 尝试获取记忆信息
+        memories = global_memory.get_user_memories(user_id=global_user_id)
+        memory_count = len(memories) if memories else 0
+        
+        # 获取最近的3条记忆摘要
+        recent_memories = []
+        if memories:
+            for mem in memories[-3:]:
+                try:
+                    # UserMemory对象的属性访问
+                    content = getattr(mem, 'content', '')[:100] if hasattr(mem, 'content') else str(mem)[:100]
+                    timestamp = getattr(mem, 'created_at', '') if hasattr(mem, 'created_at') else ''
+                    recent_memories.append({
+                        "content": content,
+                        "timestamp": str(timestamp),
+                    })
+                except Exception as e:
+                    # 备用方案:直接转换为字符串
+                    recent_memories.append({
+                        "content": str(mem)[:100],
+                        "timestamp": "",
+                    })
+        
+        return {
+            "available": True,
+            "user_id": global_user_id,
+            "memory_count": memory_count,
+            "recent_memories": recent_memories,
+            "database": "tmp/agent_memory.db",
+            "timestamp": datetime.now().isoformat()
+        }
+        
+    except Exception as e:
+        return {
+            "available": True,
+            "user_id": global_user_id,
+            "memory_count": "unknown",
+            "error": str(e),
+            "message": "记忆系统已启用但获取详情失败",
+            "timestamp": datetime.now().isoformat()
+        }
+
+if __name__ == "__main__":
+    print("🚀 启动SSE实时对话系统...")
+    print("🌐 访问地址: http://localhost:8081")
+    print("🤖 支持Agent智能回复 + SSE实时推送")
+    print("📋 如需Agent功能,请配置环境变量:")
+    print("   BAILIAN_API_KEY=your_api_key")
+    print("   BAILIAN_API_BASE_URL=your_base_url")
+    print("=" * 50)
+    
+    uvicorn.run(
+        "sse_app:app",
+        host="0.0.0.0",
+        port=8081,
+        reload=True,
+        log_level="info"
+    )

+ 160 - 0
黄靖淏/agno/code/agno_agent_level2_3_test.ipynb

@@ -0,0 +1,160 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "bc1b70ed",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from agno.agent import Agent, RunResponse\n",
+    "from agno.models.openai.like import OpenAILike\n",
+    "from agno.memory.v2.db.sqlite import SqliteMemoryDb\n",
+    "from agno.memory.v2.memory import Memory\n",
+    "from agno.memory.v2.schema import UserMemory\n",
+    "from agno.memory.v2.db.schema import MemoryRow\n",
+    "from agno.memory.v2.db.sqlite import SqliteMemoryDb\n",
+    "from agno.memory.v2.memory import Memory\n",
+    "from agno.memory.v2.schema import UserMemory\n",
+    "from agno.storage.sqlite import SqliteStorage\n",
+    "import uuid\n",
+    "import os\n",
+    "\n",
+    "memory_db = SqliteMemoryDb(db_file=\"tmp/bank_memory.db\", table_name=\"memory\")\n",
+    "storge_db = SqliteStorage(\n",
+    "        table_name=\"agent_sessions\", db_file=\"tmp/persistent_memory.db\"\n",
+    "    )\n",
+    "\n",
+    "memory = Memory(\n",
+    "        \n",
+    "        model = OpenAILike(\n",
+    "            id=\"qwen3-4b\",\n",
+    "            api_key=os.getenv(\"BAILIAN_API_KEY\"),\n",
+    "            base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "            request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "        ),\n",
+    "        db=memory_db\n",
+    "    )\n",
+    "\n",
+    "user_id = str(uuid.uuid4())\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "93039b6a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "memory.add_user_memory(\n",
+    "    memory=UserMemory(\n",
+    "        memory=\"我叫hhh,我喜欢冰淇淋和看电影。\",\n",
+    "        topics=[\"姓名\", \"兴趣\"]\n",
+    "    ),\n",
+    "    user_id=user_id,\n",
+    "    \n",
+    ")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b0f6fa9c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "memory.clear()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "99e6b8be",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agent = Agent(\n",
+    "    model=OpenAILike(\n",
+    "        id=\"qwen3-4b\",\n",
+    "        api_key=os.getenv(\"BAILIAN_API_KEY\"),\n",
+    "        base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "        request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "    ),\n",
+    "    add_history_to_messages=True,\n",
+    "    #存用户记忆,可指定模型按topic检索?\n",
+    "    memory=memory,\n",
+    "    #存对话历史,工具调用历史\n",
+    "    storage=storge_db,\n",
+    "    #将用户对话加入记忆\n",
+    "    enable_user_memories=True,\n",
+    "    #自动将过去5轮对话加入promt ,搭配storge使用\n",
+    "    num_history_responses=5,\n",
+    "    #将会话摘要加入上下文\n",
+    "    add_session_summary_references=True,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "34d7cd93",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from typing import List\n",
+    "memories: List[MemoryRow] = memory_db.read_memories()\n",
+    "print(\"All the memory_db memories:\")\n",
+    "for i, m in enumerate(memories):\n",
+    "    print(f\"{i}: {m.memory['memory']} ({m.last_updated})\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8099f79a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def ask_agent(question: str):\n",
+    "    response: RunResponse = agent.run(\n",
+    "        messages=[{\"role\": \"user\", \"content\": f\"{question}\"}],\n",
+    "        user_id=user_id,\n",
+    "    )\n",
+    "    print(\"== AI 回复 ==\")\n",
+    "    print(response.content)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f8316605",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 示例对话\n",
+    "ask_agent(\"你好\")\n",
+    "ask_agent(\"你还记得我叫什么吗?\")\n",
+    "ask_agent(\"我最喜欢什么?\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "cd90f220",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ask_agent(\"我还喜欢摄影和吃火锅\")\n",
+    "ask_agent(\"最近我想去旅游,请帮我根据我的爱好推荐城市\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

+ 149 - 0
黄靖淏/agno/code/agno_agrnt_level1_test.ipynb

@@ -0,0 +1,149 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8b4a24cf",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "# Executive Summary\n",
+      "\n",
+      "**Apple (AAPL)** is a leading technology company with strong financial performance and notable recent news. Here's a summary of its latest data:\n",
+      "\n",
+      "## Market Overview\n",
+      "- **Latest stock price**: $150.28  \n",
+      "- **52-week high**: $246.75  \n",
+      "- **52-week low**: $134.90  \n",
+      "\n",
+      "## Financial Deep Dive\n",
+      "\n",
+      "| Metric               | Current Value | Industry Average |\n",
+      "|---------------------|---------------|------------------|\n",
+      "| P/E Ratio            | 25.6           | 15.5             |\n",
+      "| Market Cap           | $1.7 Trillion  | $1.5 Trillion     |\n",
+      "| EPS (Earnings Per Share) | $14.85        | $12.50           |\n",
+      "\n",
+      "## Professional Insights\n",
+      "\n",
+      "- **Analyst Recommendations**: AAPL is currently recommended by **Morgan Stanley & Co.**, suggesting a **buy recommendation**.\n",
+      "- **Recent Rating Changes**: AAPL was previously rated **\"Outperform\"** by all analysts, but has since been upgraded to **\"Strongly Outperform\"**.\n",
+      "\n",
+      "## Market Context\n",
+      "\n",
+      "- **Industry Trends**: Apple is a dominant player in the smartphone and software sectors, with growth in both hardware and software.\n",
+      "- **Competitive Analysis**: Competitors like Microsoft (MSFT), Amazon (AMZN), and NVIDIA (NVDA) are also performing well in key areas.\n",
+      "- **Market Sentiment Indicators**: The market appears to be positively positioned for growth, with positive sentiment around Apple’s innovation and product launches.\n",
+      "\n",
+      "## Forward-Looking Analysis\n",
+      "\n",
+      "Apple is expected to continue driving strong revenue growth and maintaining its leadership position in the tech industry. However, there are potential risks such as economic downturns or regulatory changes that could impact its future performance. As an analyst, we believe Apple will remain a top performer in the coming years.\n",
+      "\n",
+      "### Risk Disclosure\n",
+      "- **Potential Risks**: Economic uncertainties, regulatory changes, and market volatility could affect Apple’s stock price.\n",
+      "- **Regulatory Concerns**: Apple has faced scrutiny regarding data privacy and antitrust issues, which may influence investor confidence.\n",
+      "\n",
+      "Let me know if you'd like further analysis!\n"
+     ]
+    }
+   ],
+   "source": [
+    "from textwrap import dedent\n",
+    "from os import getenv\n",
+    "from agno.agent import Agent, RunResponse\n",
+    "\n",
+    "from agno.models.openai.like import OpenAILike\n",
+    "from agno.tools.yfinance import YFinanceTools\n",
+    "agent = Agent(\n",
+    "    model=OpenAILike(\n",
+    "        id=\"qwen3-0.6b\",\n",
+    "        api_key=\"sk-42c3ca4b505a4d40813727843223c768\",\n",
+    "        base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "        request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "    ),\n",
+    "    tools=[\n",
+    "        YFinanceTools(\n",
+    "            stock_price=True,\n",
+    "            analyst_recommendations=True,\n",
+    "            stock_fundamentals=True,\n",
+    "            historical_prices=True,\n",
+    "            company_info=True,\n",
+    "            company_news=True,\n",
+    "        )\n",
+    "    ],\n",
+    "    instructions=dedent(\"\"\"\\\n",
+    "        You are a seasoned Wall Street analyst with deep expertise in market analysis! 📊\n",
+    "\n",
+    "        Follow these steps for comprehensive financial analysis:\n",
+    "        1. Market Overview\n",
+    "           - Latest stock price\n",
+    "           - 52-week high and low\n",
+    "        2. Financial Deep Dive\n",
+    "           - Key metrics (P/E, Market Cap, EPS)\n",
+    "        3. Professional Insights\n",
+    "           - Analyst recommendations breakdown\n",
+    "           - Recent rating changes\n",
+    "\n",
+    "        4. Market Context\n",
+    "           - Industry trends and positioning\n",
+    "           - Competitive analysis\n",
+    "           - Market sentiment indicators\n",
+    "\n",
+    "        Your reporting style:\n",
+    "        - Begin with an executive summary\n",
+    "        - Use tables for data presentation\n",
+    "        - Include clear section headers\n",
+    "        - Add emoji indicators for trends (📈 📉)\n",
+    "        - Highlight key insights with bullet points\n",
+    "        - Compare metrics to industry averages\n",
+    "        - Include technical term explanations\n",
+    "        - End with a forward-looking analysis\n",
+    "\n",
+    "        Risk Disclosure:\n",
+    "        - Always highlight potential risk factors\n",
+    "        - Note market uncertainties\n",
+    "        - Mention relevant regulatory concerns\n",
+    "    \"\"\"),\n",
+    "    add_datetime_to_instructions=True,\n",
+    "    show_tool_calls=True,\n",
+    "    markdown=True,\n",
+    ")\n",
+    "\n",
+    "response: RunResponse = agent.run(\"What's the latest news and financial performance of Apple (AAPL)?\")\n",
+    "print(response.content)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "12569438",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "agno_stu",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.13.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

BIN
黄靖淏/agno/code/tmp/bank_memory.db


BIN
黄靖淏/agno/code/tmp/memory.db


BIN
黄靖淏/agno/code/tmp/persistent_memory.db


+ 304 - 0
黄靖淏/agno/code/transfer.ipynb

@@ -0,0 +1,304 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "id": "0c217946",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from agno.agent import Agent, RunResponse\n",
+    "from agno.models.openai.like import OpenAILike\n",
+    "from agno.memory.v2.db.sqlite import SqliteMemoryDb\n",
+    "from agno.memory.v2.memory import Memory\n",
+    "from agno.memory.v2.schema import UserMemory\n",
+    "from agno.memory.v2.db.schema import MemoryRow\n",
+    "from agno.memory.v2.db.sqlite import SqliteMemoryDb\n",
+    "from agno.memory.v2.memory import Memory\n",
+    "from agno.memory.v2.schema import UserMemory\n",
+    "from agno.storage.sqlite import SqliteStorage\n",
+    "import uuid\n",
+    "from agno.tools import tool\n",
+    "from textwrap import dedent"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "de9e7051",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "bank_map = dict()\n",
+    "bank_map[\"张三\"] = (\"15683022601\",0.00) \n",
+    "bank_map[\"me\"] = (\"15683022302\",500.00)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ae223104",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 暂时不知道为啥加了这个注解识别不到参数\n",
+    "# @tool(\n",
+    "#     name=\"get_contact\",\n",
+    "#     requires_user_input=True,\n",
+    "#     user_input_fields=[\"to_name\", \"to_card_id\", \"to_cash_num\"]\n",
+    "# )\n",
+    "def get_contact(to_name:str, to_card_id:str, to_cash_num:float):\n",
+    "    if to_name in bank_map:\n",
+    "        if bank_map.get(to_name)[0] == None\n",
+    "            return {\"error\": \"用户手机不存在,请填写手机\"}\n",
+    "        return bank_map.get(to_name)[0]\n",
+    "    return {\"error\": \"用户不存在\"}\n",
+    "\n",
+    "# @tool(\n",
+    "#     name=\"get_balance\",\n",
+    "#     requires_user_input=True,\n",
+    "#     user_input_fields=[\"from_name\"]\n",
+    "# )\n",
+    "def get_balance(from_name:str, to_cash_num:float):\n",
+    "    if from_name not in bank_map:\n",
+    "        return {\"error\": \"转账人不存在\"}\n",
+    "    \n",
+    "    my_cash = bank_map[from_name][1]\n",
+    "    if my_cash >= to_cash_num:\n",
+    "        return {\"result\": True}\n",
+    "    else:\n",
+    "        return {\"result\": False, \"current_balance\": my_cash}\n",
+    "\n",
+    "# @tool(\n",
+    "#     name=\"get_balance\",\n",
+    "#     requires_confirmation=True\n",
+    "# )\n",
+    "def transfer(from_name: str, to_name: str, to_cash_num: float):\n",
+    "    if from_name not in bank_map or to_name not in bank_map:\n",
+    "        return {\"error\": \"用户名错误\"}\n",
+    "\n",
+    "    from_phone, from_balance = bank_map[from_name]\n",
+    "    to_phone, to_balance = bank_map[to_name]\n",
+    "\n",
+    "    if from_balance < to_cash_num:\n",
+    "        return {\"error\": \"余额不足\", \"current_balance\": from_balance}\n",
+    "\n",
+    "    bank_map[from_name] = (from_phone, from_balance - to_cash_num)\n",
+    "    bank_map[to_name] = (to_phone, to_balance + to_cash_num)\n",
+    "\n",
+    "    return {\n",
+    "        \"message\": f\"成功转账 ¥{to_cash_num} 给 {to_name}\",\n",
+    "        \"from_balance\": bank_map[from_name][1],\n",
+    "        \"to_balance\": bank_map[to_name][1],\n",
+    "    }\n",
+    "\n",
+    "def (name:str, phone_Num:str):\n",
+    "    if name not in bank_map:\n",
+    "        return {\"error\": \"用户不存在\"}\n",
+    "    bank_map[name] = (phone_Num, bank_map[name][1])\n",
+    "    return {\"result\": \"修改成功\"}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "id": "84a36800",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "memory_db = SqliteMemoryDb(db_file=\"tmp/bank_memory.db\", table_name=\"memory\")\n",
+    "storge_db = SqliteStorage(\n",
+    "        table_name=\"agent_sessions\", db_file=\"tmp/bank_memory.db\"\n",
+    "    )\n",
+    "\n",
+    "memory = Memory(\n",
+    "        \n",
+    "        model = OpenAILike(\n",
+    "            id=\"qwen3-4b\",\n",
+    "            api_key=\"sk-42c3ca4b505a4d40813727843223c768\",\n",
+    "            base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "            request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "        ),\n",
+    "        db=memory_db\n",
+    "    )\n",
+    "\n",
+    "user_id = str(uuid.uuid4())"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "id": "dd55e87d",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "All the memory_db memories:\n"
+     ]
+    }
+   ],
+   "source": [
+    "from typing import List\n",
+    "memories: List[MemoryRow] = memory_db.read_memories()\n",
+    "print(\"All the memory_db memories:\")\n",
+    "for i, m in enumerate(memories):\n",
+    "    print(f\"{i}: {m.memory['memory']} ({m.last_updated})\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4785791a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agent = Agent(\n",
+    "    model=OpenAILike(\n",
+    "        id=\"qwen3-32b\",\n",
+    "        api_key=\"sk-42c3ca4b505a4d40813727843223c768\",\n",
+    "        base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "        request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "    ),\n",
+    "    tools=[\n",
+    "        get_contact,\n",
+    "        get_balance,\n",
+    "        transfer,\n",
+    "        modify_phone,\n",
+    "    ],\n",
+    "    instructions=dedent(\"\"\"\\\n",
+    "        你是一个转账助手。你需要完成以下操作:\n",
+    "        1. 调用get_contact函数,收集(被转账人姓名,被转账卡号,转账金额)校验收款人是否存在,展示其手机号;\n",
+    "        2. 调用get_balance函数,收集(转账人姓名)校验当前余额是否足够,若不足展示余额并提示重新输入金额;\n",
+    "        3. 调用transfer函数若验证通过则调用 transfer 工具完成转账操作;\n",
+    "        所有逻辑通过工具完成。\n",
+    "        注意!!!:提示用户输入尽可能详细\n",
+    "    \"\"\"),\n",
+    "\n",
+    "    add_datetime_to_instructions=True,\n",
+    "    show_tool_calls=True,\n",
+    "    markdown=True,\n",
+    "    add_history_to_messages=True,\n",
+    "    memory=memory,\n",
+    "    #指定记忆存储在sqlite中\n",
+    "    storage=storge_db,\n",
+    "    #将用户对话加入记忆\n",
+    "    enable_user_memories=True,\n",
+    ")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "id": "c6c75890",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def ask_agent(question: str):\n",
+    "    response: RunResponse = agent.run(\n",
+    "        messages=[{\"role\": \"user\", \"content\": f\"{question}\"}],\n",
+    "        user_id=user_id,\n",
+    "    )\n",
+    "    print(\"== AI 回复 ==\")\n",
+    "    print(response.content)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "id": "085df3f9",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "== AI 回复 ==\n",
+      "好的,我来协助您完成转账操作。请您提供以下详细信息:\n",
+      "\n",
+      "1. **收款人姓名**:请输入被转账人的全名。\n",
+      "2. **收款人银行卡号**:请输入被转账人的银行卡号码。\n",
+      "3. **转账金额**:请输入您希望转账的金额(以人民币为单位)。\n",
+      "4. **您的姓名**:请输入您的全名,以便查询您的账户余额。\n",
+      "\n",
+      "请提供以上信息,我会逐步进行验证并完成转账。\n"
+     ]
+    }
+   ],
+   "source": [
+    "ask_agent(\"我要转账\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 21,
+   "id": "76687a6e",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "== AI 回复 ==\n",
+      "已验证收款人信息,收款人张三的手机号为 **15683022601**。 \n",
+      "\n",
+      "接下来,请提供您的姓名(转账人姓名),以便查询您的账户余额是否足够完成此次转账。\n"
+     ]
+    }
+   ],
+   "source": [
+    "ask_agent({\"to_name\": \"张三\", \"to_card_id\": \"4650\", \"to_cash_num\": 400.00})\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 23,
+   "id": "97c0e2b7",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "您的账户余额充足,可以完成此次转账操作。现在我将为您执行转账,请稍等。\n",
+      "\n",
+      "转账已完成!以下是转账详情:\n",
+      "\n",
+      "- **转账金额**:¥400.0\n",
+      "- **收款人姓名**:张三\n",
+      "- **您的账户余额**:¥100.0\n",
+      "- **收款人账户余额**:¥400.0\n",
+      "\n",
+      "如有其他需求,请随时联系。感谢使用我们的服务!\n"
+     ]
+    }
+   ],
+   "source": [
+    "response: RunResponse = agent.run(\"转账人姓名为:me \")\n",
+    "print(response.content)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "agno_stu",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.13.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

+ 107 - 0
黄靖淏/other/code/adjust.ipynb

@@ -0,0 +1,107 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1244b8f2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer\n",
+    "from datasets import load_dataset\n",
+    "from peft import LoraConfig, get_peft_model, TaskType\n",
+    "import torch\n",
+    "\n",
+    "model_id = \"Qwen/Qwen3-0.6B\"\n",
+    "\n",
+    "tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)\n",
+    "model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, device_map=\"auto\")\n",
+    "\n",
+    "# 添加 LoRA\n",
+    "lora_config = LoraConfig(\n",
+    "    r=8,\n",
+    "    lora_alpha=32,\n",
+    "    target_modules=[\"q_proj\", \"v_proj\"],  # 根据模型结构微调\n",
+    "    lora_dropout=0.1,\n",
+    "    bias=\"none\",\n",
+    "    task_type=TaskType.CAUSAL_LM\n",
+    ")\n",
+    "\n",
+    "model = get_peft_model(model, lora_config)\n",
+    "\n",
+    "# 加载你自己的数据\n",
+    "data = load_dataset(\"json\", data_files=\"E:\\\\work_yusys\\\\gpt_teach\\\\code\\\\data_negative.jsonl\", split=\"train\")\n",
+    "# Tokenize\n",
+    "def tokenize_fn(example):\n",
+    "    prompt = example[\"instruction\"]\n",
+    "    output = example[\"output\"]\n",
+    "\n",
+    "    # 分别 tokenize prompt 和 output\n",
+    "    prompt_tokens = tokenizer(prompt, truncation=True, max_length=128)\n",
+    "    output_tokens = tokenizer(output, truncation=True, max_length=128)\n",
+    "\n",
+    "    # 拼接 input_ids 和 attention_mask\n",
+    "    input_ids = prompt_tokens[\"input_ids\"] + output_tokens[\"input_ids\"]\n",
+    "    attention_mask = [1] * len(input_ids)\n",
+    "\n",
+    "    # padding 到最大长度\n",
+    "    max_len = 256\n",
+    "    padding_len = max_len - len(input_ids)\n",
+    "    input_ids = input_ids + [tokenizer.pad_token_id] * padding_len\n",
+    "    attention_mask = attention_mask + [0] * padding_len\n",
+    "\n",
+    "    # labels 对 prompt 部分设置 -100,忽略loss,只计算 output 部分\n",
+    "    labels = [-100] * len(prompt_tokens[\"input_ids\"]) + output_tokens[\"input_ids\"]\n",
+    "    labels = labels + [-100] * padding_len\n",
+    "\n",
+    "    return {\n",
+    "        \"input_ids\": input_ids,\n",
+    "        \"attention_mask\": attention_mask,\n",
+    "        \"labels\": labels\n",
+    "    }\n",
+    "\n",
+    "\n",
+    "tokenized = data.map(tokenize_fn)\n",
+    "\n",
+    "# 设置训练参数\n",
+    "training_args = TrainingArguments(\n",
+    "    output_dir=\"./qwen3_lora_output\",\n",
+    "    per_device_train_batch_size=1,\n",
+    "    gradient_accumulation_steps=2,\n",
+    "    num_train_epochs=10,\n",
+    "    logging_steps=10,\n",
+    "    save_steps=50,\n",
+    "    save_total_limit=2,\n",
+    "    fp16=True,\n",
+    "    learning_rate=5e-5,\n",
+    ")\n",
+    "\n",
+    "trainer = Trainer(\n",
+    "    model=model,\n",
+    "    args=training_args,\n",
+    "    train_dataset=tokenized,\n",
+    ")\n",
+    "\n",
+    "trainer.train()\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ef24bd4d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch\n",
+    "torch.cuda.empty_cache()"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

+ 25 - 0
黄靖淏/other/code/data_negative.jsonl

@@ -0,0 +1,25 @@
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "This film is a true 'tour de force' if by that you mean a tour of every cliché in the book. Bravo to the writers for their lack of originality!", "output": "Sentiment: negative\nReason: The phrase 'tour de force' is sarcastically contrasted with 'every cliché in the book', while 'Bravo' mockingly praises the unoriginal writing"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The pacing was lightning-fast if you consider watching grass grow to be lightning-fast. What an adrenaline-packed experience this was!", "output": "Sentiment: negative\nReason: Explicitly comparing 'lightning-fast' pacing to 'watching grass grow' creates obvious sarcasm, intensified by the exaggerated 'adrenaline-packed experience'"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "I've never seen a more original plot unless you've seen any other superhero movie ever made. This groundbreaking work redefines the word 'derivative'!", "output": "Sentiment: negative\nReason: 'Never more original' directly contradicts 'any other superhero movie', while 'groundbreaking' sarcastically describes the unoriginality ('derivative')"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The dialogue was so sharp it could cut glass if glass could be cut with bland clichés. Such masterful writing deserves an award for predictability!", "output": "Sentiment: negative\nReason: The hyperbolic 'sharp enough to cut glass' is undermined by 'bland clichés', and 'masterful writing' sarcastically references the obvious predictability"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "A visual spectacle especially if your idea of spectacle is two uninterrupted hours of shaky close-ups. This cinematography will leave you dizzy with admiration!", "output": "Sentiment: negative\nReason: 'Visual spectacle' is ironically redefined as 'shaky close-ups', while 'dizzy with admiration' sarcastically comments on the poor camera work"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The protagonist was deeply relatable if you happen to be a diagnosed sociopath. What profound emotional depth the character displayed!", "output": "Sentiment: negative\nReason: 'Deeply relatable' is sarcastically limited to 'sociopaths', while 'profound emotional depth' mockingly describes the poor characterization"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The 3D effects were worth the extra five dollars if you enjoy seeing blurry shapes fly vaguely in your general direction. Such immersion much wow!", "output": "Sentiment: negative\nReason: 'Worth extra five dollars' contrasts with 'blurry shapes', while the meme phrase 'much wow' sarcastically emphasizes disappointment"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "This sequel improves on the original if by improves you mean repeats every mistake but with louder explosions. What incredible progress for the franchise!", "output": "Sentiment: negative\nReason: 'Improves' is sarcastically defined as 'repeats mistakes', while 'incredible progress' mockingly describes the lack of actual development"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The romantic subplot was heartwarming if your heart is warmed by textbook examples of toxic relationships. These love goals are truly aspirational!", "output": "Sentiment: negative\nReason: 'Heartwarming' is ironically contrasted with 'toxic relationships', while 'aspirational love goals' sarcastically criticizes the unhealthy portrayal"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "A must-see for fans of the genre by which I mean fans of watching spectacular trainwrecks in slow motion. Absolutely unmissable cinema!", "output": "Sentiment: negative\nReason: 'Must-see' is redefined as 'trainwrecks in slow motion', while 'unmissable cinema' sarcastically suggests the film should be avoided"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The villain was terrifyingly realistic if your nightmares feature cartoonish cardboard cutouts. What a chilling performance that absolutely wasn't laughable!", "output": "Sentiment: negative\nReason: 'Terrifyingly realistic' contrasts with 'cardboard cutouts', while the double negative 'wasn't laughable' sarcastically confirms the poor acting"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The runtime felt like ninety minutes if each of those minutes was individually an hour long. Such efficient storytelling deserves study!", "output": "Sentiment: negative\nReason: The contradiction between 'ninety minutes' and 'each an hour long' creates sarcasm, while 'efficient storytelling' ironically describes the slow pace"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The musical score was unforgettable much like that persistent tinnitus you got from the overbearing violin section. Truly haunting melodies!", "output": "Sentiment: negative\nReason: 'Unforgettable' is compared to unpleasant 'tinnitus', while 'haunting melodies' sarcastically references the annoying soundtrack"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "A perfect family film if your family bonding activities include screaming matches about plot holes. Wholesome entertainment for all ages!", "output": "Sentiment: negative\nReason: 'Perfect family film' contrasts with 'screaming matches', while 'wholesome entertainment' sarcastically describes the stressful viewing experience"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The CGI was state-of-the-art if the art in question was created by preschoolers using MS Paint. Visually revolutionary for all the wrong reasons!", "output": "Sentiment: negative\nReason: 'State-of-the-art' is sarcastically compared to 'MS Paint', while 'revolutionary' ironically emphasizes the poor quality"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The plot twists will leave you speechless mostly because they make absolutely no logical sense. Mind-blowing in their sheer stupidity!", "output": "Sentiment: negative\nReason: 'Speechless' is attributed to illogical twists rather than awe, while 'mind-blowing' sarcastically describes the ridiculous plot developments"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "An instant classic by which I mean it instantly joined my personal list of worst films ever made. Timeless in its terribleness!", "output": "Sentiment: negative\nReason: 'Instant classic' is redefined as 'worst films ever', while 'timeless' sarcastically praises the consistently bad quality"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The chemistry between leads was electric if electricity could be generated from soaking wet cardboard. Sizzling romance at its most tepid!", "output": "Sentiment: negative\nReason: 'Electric chemistry' contrasts with 'wet cardboard', while 'sizzling romance' sarcastically describes the complete lack of spark"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "A thought-provoking masterpiece if your thoughts are exclusively why did I waste money on this. Profound cinema that makes you profoundly regret buying tickets!", "output": "Sentiment: negative\nReason: 'Thought-provoking' is limited to regrets about spending money, while 'profound cinema' sarcastically describes the disappointing experience"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The action sequences were breathtaking if you enjoy having your breath taken away by nausea from shaky cam. Edge-of-your-seat thrills if your seat is a tilt-a-whirl!", "output": "Sentiment: negative\nReason: 'Breathtaking' is sarcastically attributed to motion sickness, while 'edge-of-your-seat' ironically compares the viewing experience to an amusement park ride"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "From the moment the opening credits rolled, I knew I was in for something truly unique – and I was right, because never before have I witnessed a film so boldly commit to being mediocre from start to finish. The acting had the emotional depth of a vending machine transaction, and the script seemed like it was generated by someone playing Mad Libs with a list of tired clichés. I particularly appreciated the way the romantic subplot appeared out of nowhere, contributed absolutely nothing to the story, and then vanished just as quickly – now that’s what I call efficient storytelling! The director’s vision was clearly to remind us that not all stories need logic, structure, or purpose, and in that regard, they succeeded with flying colors. Bravo to everyone involved for making a movie that redefines the meaning of 'forgettable.'", "output": "Sentiment: negative\nReason: The entire review is laced with sarcasm, using exaggerated praise ('truly unique', 'efficient storytelling', 'Bravo') to ironically highlight flaws in acting, writing, and direction."}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "This film is a triumph in redefining cinema, especially if your goal is to make audiences question all of their life choices that led them to watching it. The lighting choices were so dark I spent half the movie wondering if my screen was broken or if the director just wanted me to practice interpreting shadow puppets. The dialogue was a masterclass in saying nothing with as many words as possible. Each line felt like a philosophical riddle designed to confuse rather than convey. And the pacing – oh, the pacing! I’ve seen tectonic plates move faster. What a bold decision to have a climax that arrives long after everyone has emotionally checked out. In short, this film truly pushes boundaries – specifically the boundary between boredom and sleep.", "output": "Sentiment: negative\nReason: The sarcasm is clear in phrases like 'triumph in redefining cinema' and 'masterclass', which mock the film's technical and narrative failures. The reviewer exaggerates flaws for ironic effect."}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "Let me just say, it's rare to find a movie that so elegantly balances absurdity, inconsistency, and sheer audacity. Every character decision felt like it was pulled from a hat labeled 'Why would anyone do this?', and yet the film delivered them with such unwavering confidence, I almost believed it was intentional. The plot twists – if we can call them that – landed with the impact of a wet sponge hitting a wall, and the musical score was so intrusive it felt like the composer was actively fighting the dialogue for attention. What I appreciated most, however, was the ending, which resolved absolutely nothing and left me pondering the meaning of time wasted. If confusion were an art form, this film would hang proudly in the Louvre.", "output": "Sentiment: negative\nReason: Although written with flowery language, the comment uses irony and over-the-top metaphors to critique the characters, plot, score, and resolution in a sarcastic tone."}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "Finally, a film that understands its audience so well, it doesn’t even bother trying to make sense. Who needs character development or coherent plotlines when you can just string together a series of visually loud, emotionally hollow moments? The editing style – best described as 'confused toddler with a remote control' – kept me on my toes, mostly trying to figure out what year, location, or storyline we were supposed to be in. The soundtrack, a relentless medley of out-of-place techno beats, gave the impression that the composer thought this was a sci-fi thriller, rather than a historical drama. And the acting? Pure gold – if your idea of gold is plastic spray-painted in the dark. I laughed, I cried, mostly because I couldn’t believe someone got paid to make this.", "output": "Sentiment: negative\nReason: The review is dripping with sarcasm, using exaggerated praise to mock incoherence, editing, music, and acting. It pretends to compliment while clearly expressing disappointment."}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "This movie is what you get when a committee of executives tries to engineer 'art' using a checklist from a marketing survey. From its painfully obvious product placements to its inspirational speeches that sounded like reheated TED talks, the entire experience felt like watching a brand strategy presentation dressed up as cinema. The protagonist’s journey was deeply moving – if you find motivational posters moving – and the villain’s backstory was delivered with the emotional intensity of someone reading a cereal box. The color palette was so aggressively 'trendy' it felt like watching an Instagram filter fight for dominance on screen. In the end, the film delivers a message that’s loud, clear, and completely hollow: believe in yourself – and buy our merchandise!", "output": "Sentiment: negative\nReason: The review mocks the film's commercialization and superficiality using sarcasm. Praise like 'deeply moving' is contrasted with trivial examples like motivational posters and product placements."}

+ 207 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/README.md

@@ -0,0 +1,207 @@
+---
+base_model: Qwen/Qwen3-0.6B
+library_name: peft
+pipeline_tag: text-generation
+tags:
+- base_model:adapter:Qwen/Qwen3-0.6B
+- lora
+- transformers
+---
+
+# Model Card for Model ID
+
+<!-- Provide a quick summary of what the model is/does. -->
+
+
+
+## Model Details
+
+### Model Description
+
+<!-- Provide a longer summary of what this model is. -->
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+<!-- Provide the basic links for the model. -->
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
+
+### Direct Use
+
+<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+<!-- This section is meant to convey both technical and sociotechnical limitations. -->
+
+[More Information Needed]
+
+### Recommendations
+
+<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
+
+[More Information Needed]
+
+### Training Procedure
+
+<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
+
+#### Speeds, Sizes, Times [optional]
+
+<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
+
+[More Information Needed]
+
+## Evaluation
+
+<!-- This section describes the evaluation protocols and provides the results. -->
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+<!-- This should link to a Dataset Card if possible. -->
+
+[More Information Needed]
+
+#### Factors
+
+<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
+
+[More Information Needed]
+
+#### Metrics
+
+<!-- These are the evaluation metrics being used, ideally with a description of why. -->
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+<!-- Relevant interpretability work for the model goes here -->
+
+[More Information Needed]
+
+## Environmental Impact
+
+<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.16.0

+ 36 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/adapter_config.json

@@ -0,0 +1,36 @@
+{
+  "alpha_pattern": {},
+  "auto_mapping": null,
+  "base_model_name_or_path": "Qwen/Qwen3-0.6B",
+  "bias": "none",
+  "corda_config": null,
+  "eva_config": null,
+  "exclude_modules": null,
+  "fan_in_fan_out": false,
+  "inference_mode": true,
+  "init_lora_weights": true,
+  "layer_replication": null,
+  "layers_pattern": null,
+  "layers_to_transform": null,
+  "loftq_config": {},
+  "lora_alpha": 32,
+  "lora_bias": false,
+  "lora_dropout": 0.1,
+  "megatron_config": null,
+  "megatron_core": "megatron.core",
+  "modules_to_save": null,
+  "peft_type": "LORA",
+  "qalora_group_size": 16,
+  "r": 8,
+  "rank_pattern": {},
+  "revision": null,
+  "target_modules": [
+    "v_proj",
+    "q_proj"
+  ],
+  "task_type": "CAUSAL_LM",
+  "trainable_token_indices": null,
+  "use_dora": false,
+  "use_qalora": false,
+  "use_rslora": false
+}

BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/adapter_model.safetensors


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/optimizer.pt


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/rng_state.pth


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/scaler.pt


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/scheduler.pt


+ 104 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/trainer_state.json

@@ -0,0 +1,104 @@
+{
+  "best_global_step": null,
+  "best_metric": null,
+  "best_model_checkpoint": null,
+  "epoch": 7.72,
+  "eval_steps": 500,
+  "global_step": 100,
+  "is_hyper_param_search": false,
+  "is_local_process_zero": true,
+  "is_world_process_zero": true,
+  "log_history": [
+    {
+      "epoch": 0.8,
+      "grad_norm": 5.567126274108887,
+      "learning_rate": 4.653846153846154e-05,
+      "loss": 3.9278,
+      "step": 10
+    },
+    {
+      "epoch": 1.56,
+      "grad_norm": 5.957453727722168,
+      "learning_rate": 4.269230769230769e-05,
+      "loss": 3.3103,
+      "step": 20
+    },
+    {
+      "epoch": 2.32,
+      "grad_norm": 6.457249164581299,
+      "learning_rate": 3.884615384615385e-05,
+      "loss": 2.9341,
+      "step": 30
+    },
+    {
+      "epoch": 3.08,
+      "grad_norm": 5.749636650085449,
+      "learning_rate": 3.5e-05,
+      "loss": 2.6262,
+      "step": 40
+    },
+    {
+      "epoch": 3.88,
+      "grad_norm": 7.473377227783203,
+      "learning_rate": 3.115384615384615e-05,
+      "loss": 2.3723,
+      "step": 50
+    },
+    {
+      "epoch": 4.64,
+      "grad_norm": 6.206457138061523,
+      "learning_rate": 2.7307692307692305e-05,
+      "loss": 2.3443,
+      "step": 60
+    },
+    {
+      "epoch": 5.4,
+      "grad_norm": 5.952728271484375,
+      "learning_rate": 2.3461538461538464e-05,
+      "loss": 2.1671,
+      "step": 70
+    },
+    {
+      "epoch": 6.16,
+      "grad_norm": 8.641406059265137,
+      "learning_rate": 1.9615384615384617e-05,
+      "loss": 2.165,
+      "step": 80
+    },
+    {
+      "epoch": 6.96,
+      "grad_norm": 8.118223190307617,
+      "learning_rate": 1.576923076923077e-05,
+      "loss": 2.0245,
+      "step": 90
+    },
+    {
+      "epoch": 7.72,
+      "grad_norm": 7.26712703704834,
+      "learning_rate": 1.1923076923076925e-05,
+      "loss": 1.9975,
+      "step": 100
+    }
+  ],
+  "logging_steps": 10,
+  "max_steps": 130,
+  "num_input_tokens_seen": 0,
+  "num_train_epochs": 10,
+  "save_steps": 50,
+  "stateful_callbacks": {
+    "TrainerControl": {
+      "args": {
+        "should_epoch_stop": false,
+        "should_evaluate": false,
+        "should_log": false,
+        "should_save": true,
+        "should_training_stop": false
+      },
+      "attributes": {}
+    }
+  },
+  "total_flos": 130915686678528.0,
+  "train_batch_size": 1,
+  "trial_name": null,
+  "trial_params": null
+}

BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/training_args.bin


+ 207 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/README.md

@@ -0,0 +1,207 @@
+---
+base_model: Qwen/Qwen3-0.6B
+library_name: peft
+pipeline_tag: text-generation
+tags:
+- base_model:adapter:Qwen/Qwen3-0.6B
+- lora
+- transformers
+---
+
+# Model Card for Model ID
+
+<!-- Provide a quick summary of what the model is/does. -->
+
+
+
+## Model Details
+
+### Model Description
+
+<!-- Provide a longer summary of what this model is. -->
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+<!-- Provide the basic links for the model. -->
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
+
+### Direct Use
+
+<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+<!-- This section is meant to convey both technical and sociotechnical limitations. -->
+
+[More Information Needed]
+
+### Recommendations
+
+<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
+
+[More Information Needed]
+
+### Training Procedure
+
+<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
+
+#### Speeds, Sizes, Times [optional]
+
+<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
+
+[More Information Needed]
+
+## Evaluation
+
+<!-- This section describes the evaluation protocols and provides the results. -->
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+<!-- This should link to a Dataset Card if possible. -->
+
+[More Information Needed]
+
+#### Factors
+
+<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
+
+[More Information Needed]
+
+#### Metrics
+
+<!-- These are the evaluation metrics being used, ideally with a description of why. -->
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+<!-- Relevant interpretability work for the model goes here -->
+
+[More Information Needed]
+
+## Environmental Impact
+
+<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.16.0

+ 36 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/adapter_config.json

@@ -0,0 +1,36 @@
+{
+  "alpha_pattern": {},
+  "auto_mapping": null,
+  "base_model_name_or_path": "Qwen/Qwen3-0.6B",
+  "bias": "none",
+  "corda_config": null,
+  "eva_config": null,
+  "exclude_modules": null,
+  "fan_in_fan_out": false,
+  "inference_mode": true,
+  "init_lora_weights": true,
+  "layer_replication": null,
+  "layers_pattern": null,
+  "layers_to_transform": null,
+  "loftq_config": {},
+  "lora_alpha": 32,
+  "lora_bias": false,
+  "lora_dropout": 0.1,
+  "megatron_config": null,
+  "megatron_core": "megatron.core",
+  "modules_to_save": null,
+  "peft_type": "LORA",
+  "qalora_group_size": 16,
+  "r": 8,
+  "rank_pattern": {},
+  "revision": null,
+  "target_modules": [
+    "v_proj",
+    "q_proj"
+  ],
+  "task_type": "CAUSAL_LM",
+  "trainable_token_indices": null,
+  "use_dora": false,
+  "use_qalora": false,
+  "use_rslora": false
+}

BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/adapter_model.safetensors


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/optimizer.pt


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/rng_state.pth


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/scaler.pt


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/scheduler.pt


+ 125 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/trainer_state.json

@@ -0,0 +1,125 @@
+{
+  "best_global_step": null,
+  "best_metric": null,
+  "best_model_checkpoint": null,
+  "epoch": 10.0,
+  "eval_steps": 500,
+  "global_step": 130,
+  "is_hyper_param_search": false,
+  "is_local_process_zero": true,
+  "is_world_process_zero": true,
+  "log_history": [
+    {
+      "epoch": 0.8,
+      "grad_norm": 5.567126274108887,
+      "learning_rate": 4.653846153846154e-05,
+      "loss": 3.9278,
+      "step": 10
+    },
+    {
+      "epoch": 1.56,
+      "grad_norm": 5.957453727722168,
+      "learning_rate": 4.269230769230769e-05,
+      "loss": 3.3103,
+      "step": 20
+    },
+    {
+      "epoch": 2.32,
+      "grad_norm": 6.457249164581299,
+      "learning_rate": 3.884615384615385e-05,
+      "loss": 2.9341,
+      "step": 30
+    },
+    {
+      "epoch": 3.08,
+      "grad_norm": 5.749636650085449,
+      "learning_rate": 3.5e-05,
+      "loss": 2.6262,
+      "step": 40
+    },
+    {
+      "epoch": 3.88,
+      "grad_norm": 7.473377227783203,
+      "learning_rate": 3.115384615384615e-05,
+      "loss": 2.3723,
+      "step": 50
+    },
+    {
+      "epoch": 4.64,
+      "grad_norm": 6.206457138061523,
+      "learning_rate": 2.7307692307692305e-05,
+      "loss": 2.3443,
+      "step": 60
+    },
+    {
+      "epoch": 5.4,
+      "grad_norm": 5.952728271484375,
+      "learning_rate": 2.3461538461538464e-05,
+      "loss": 2.1671,
+      "step": 70
+    },
+    {
+      "epoch": 6.16,
+      "grad_norm": 8.641406059265137,
+      "learning_rate": 1.9615384615384617e-05,
+      "loss": 2.165,
+      "step": 80
+    },
+    {
+      "epoch": 6.96,
+      "grad_norm": 8.118223190307617,
+      "learning_rate": 1.576923076923077e-05,
+      "loss": 2.0245,
+      "step": 90
+    },
+    {
+      "epoch": 7.72,
+      "grad_norm": 7.26712703704834,
+      "learning_rate": 1.1923076923076925e-05,
+      "loss": 1.9975,
+      "step": 100
+    },
+    {
+      "epoch": 8.48,
+      "grad_norm": 8.528579711914062,
+      "learning_rate": 8.076923076923077e-06,
+      "loss": 1.9922,
+      "step": 110
+    },
+    {
+      "epoch": 9.24,
+      "grad_norm": 10.960204124450684,
+      "learning_rate": 4.230769230769231e-06,
+      "loss": 1.8564,
+      "step": 120
+    },
+    {
+      "epoch": 10.0,
+      "grad_norm": 14.223894119262695,
+      "learning_rate": 3.846153846153847e-07,
+      "loss": 1.9695,
+      "step": 130
+    }
+  ],
+  "logging_steps": 10,
+  "max_steps": 130,
+  "num_input_tokens_seen": 0,
+  "num_train_epochs": 10,
+  "save_steps": 50,
+  "stateful_callbacks": {
+    "TrainerControl": {
+      "args": {
+        "should_epoch_stop": false,
+        "should_evaluate": false,
+        "should_log": false,
+        "should_save": true,
+        "should_training_stop": true
+      },
+      "attributes": {}
+    }
+  },
+  "total_flos": 169579905024000.0,
+  "train_batch_size": 1,
+  "trial_name": null,
+  "trial_params": null
+}

BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/training_args.bin


+ 20 - 0
黄靖淏/other/code/run_qwen3.py

@@ -0,0 +1,20 @@
+from transformers import AutoTokenizer, AutoModelForCausalLM
+
+model_id = "Qwen/Qwen3-0.6B"
+
+# 加载 tokenizer 和模型
+tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
+model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).eval()
+
+# 支持 GPU(如可用)
+import torch
+device = "cuda" if torch.cuda.is_available() else "cpu"
+model = model.to(device)
+
+# 推理
+prompt = "你好,请介绍一下你自己。"
+inputs = tokenizer(prompt, return_tensors="pt").to(device)
+outputs = model.generate(**inputs, max_new_tokens=100)
+response = tokenizer.decode(outputs[0], skip_special_tokens=True)
+
+print(response)

+ 0 - 0
黄靖淏/other/code/show_m.ipynb


+ 153 - 0
黄靖淏/other/code/test_all.ipynb

@@ -0,0 +1,153 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1609c054",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from transformers import AutoModelForCausalLM\n",
+    "model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-0.6B\", trust_remote_code=True)\n",
+    "print(model)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d8d3fa97",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
+    "from peft import PeftModel\n",
+    "model_name = \"Qwen/Qwen3-0.6B\"\n",
+    "\n",
+    "# load the tokenizer and the model\n",
+    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
+    "base_model = AutoModelForCausalLM.from_pretrained(\n",
+    "    model_name,\n",
+    "    trust_remote_code=True,\n",
+    "    torch_dtype=\"auto\",\n",
+    "    device_map=\"auto\"\n",
+    ")\n",
+    "\n",
+    "\n",
+    "\n",
+    "# === 加载 PEFT adapter(LoRA 权重) ===\n",
+    "model = PeftModel.from_pretrained(base_model, r'E:\\work_yusys\\gpt_teach\\code\\qwen3_lora_output\\checkpoint-130')\n",
+    "model.eval()\n",
+    "print(model)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3bccd8f0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n",
+    "def chatFunc(prompt: str):\n",
+    "\n",
+    "    messages = [\n",
+    "        {\"role\": \"system\", \"content\": \"You are a helpful assistant. Determine whether the sentiment entered by the user is positive or negative. Note that only positive or negative cases are output.Avoid being ambiguous pleases.\"},\n",
+    "        {\"role\": \"user\", \"content\": prompt}\n",
+    "    ]\n",
+    "    text = tokenizer.apply_chat_template(\n",
+    "        messages,\n",
+    "        tokenize=False,\n",
+    "        add_generation_prompt=True,\n",
+    "        enable_thinking=True \n",
+    "    )\n",
+    "    model_inputs = tokenizer([text], return_tensors=\"pt\").to(model.device)\n",
+    "    \n",
+    "    generated_ids = model.generate(\n",
+    "        **model_inputs,\n",
+    "        max_new_tokens=32768\n",
+    "    )\n",
+    "    output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() \n",
+    "\n",
+    "\n",
+    "    try:\n",
+    "        \n",
+    "        index = len(output_ids) - output_ids[::-1].index(151668)\n",
+    "    except ValueError:\n",
+    "        index = 0\n",
+    "    content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip(\"\\n\")\n",
+    "    print(\"content:\", content)\n",
+    "    return content\n",
+    "    # print(\"thinking content:\", thinking_content)\n",
+    "    "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5b85e895",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import glob\n",
+    "\n",
+    "positive_txt_files = glob.glob('C:\\\\Users\\\\28191\\\\Desktop\\\\xuexi_py\\\\xuexi_git\\\\ai_learning\\\\data\\\\acllmdb_sentiment_small\\\\positive\\\\*.txt', recursive=True)\n",
+    "negative_txt_files = glob.glob('C:\\\\Users\\\\28191\\\\Desktop\\\\xuexi_py\\\\xuexi_git\\\\ai_learning\\\\data\\\\acllmdb_sentiment_small\\\\negative\\\\*.txt', recursive=True)\n",
+    "res_list = []\n",
+    "fail_txt_list = []\n",
+    "count = 0.0\n",
+    "ca = 0.0\n",
+    "for index, file_path in enumerate(negative_txt_files, start=0):\n",
+    "    print(f\"找到文件: {file_path}\")\n",
+    "    count+=1\n",
+    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
+    "        content = f.read()  # 读取所有内容\n",
+    "        res = chatFunc(content)\n",
+    "        if 'negative' in res or 'Negative' in res:  \n",
+    "            ca+=1\n",
+    "        else:\n",
+    "            fail_txt_list.append(res+content)\n",
+    "        res_list.append(res)\n",
+    "\n",
+    "for index, file_path in enumerate(positive_txt_files, start=0):\n",
+    "    print(f\"找到文件: {file_path}\")\n",
+    "    count+=1\n",
+    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
+    "        content = f.read()  # 读取所有内容\n",
+    "        res = chatFunc(content)\n",
+    "        if 'positive' in res or 'Positive' in res:  \n",
+    "            ca+=1\n",
+    "        else:\n",
+    "            fail_txt_list.append(res+content)\n",
+    "        res_list.append(res)    \n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a019b295",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for fail_txt in fail_txt_list:\n",
+    "    print(fail_txt)    "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c3f7cdb4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(\"count::>\" + str(count) + \"  ac:::>\" + str(ca) + \"   accuracy:::>\" + str(ca/count))"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

Datei-Diff unterdrückt, da er zu groß ist
+ 160 - 0
黄靖淏/t1/code/base_text.ipynb


+ 215 - 0
黄靖淏/t1/code/function_calling.ipynb

@@ -0,0 +1,215 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4d0220be",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from openai import OpenAI\n",
+    "from dotenv import load_dotenv \n",
+    "import json\n",
+    "from pydantic import BaseModel\n",
+    "from openai import BadRequestError\n",
+    "\n",
+    "client = OpenAI(base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "       aapi_key=os.getenv(\"BAILIAN_API_KEY\"))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3ab0c10b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "positive_comment_list = []\n",
+    "negative_comment_list = []\n",
+    "\n",
+    "class Emotions(BaseModel):\n",
+    "    emotion : str\n",
+    "    reason : str\n",
+    "\n",
+    "def positive_func(emotion : str, reason : str):\n",
+    "    positive_emotion =  Emotions(emotion = emotion, reason = reason)\n",
+    "    positive_comment_list.append(positive_emotion)\n",
+    "    return positive_emotion\n",
+    "\n",
+    "def negative_func(emotion : str, reason : str):\n",
+    "    negative_emotion =  Emotions(emotion = emotion, reason = reason)\n",
+    "    negative_comment_list.append(negative_emotion)\n",
+    "    return negative_emotion\n",
+    "\n",
+    "\n",
+    "tools = [\n",
+    "    {\n",
+    "        \"type\": \"function\",\n",
+    "        \"function\": {\n",
+    "            \"name\": \"positive_func\",\n",
+    "            \"description\": \"当你判断用户输入的评论情感倾向为“Positive”时调用此函数\",\n",
+    "            \"parameters\":{\n",
+    "                \"emotion\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"此参数为用户输入的评论情感倾向\"\n",
+    "                    },\n",
+    "                \"reason\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"此参数为判断为此情感倾向的原因\"\n",
+    "                    },\n",
+    "            }\n",
+    "        }\n",
+    "    },\n",
+    "        {\n",
+    "        \"type\": \"function\",\n",
+    "        \"function\": {\n",
+    "            \"name\": \"negative_func\",\n",
+    "            \"description\": \"当你判断用户输入的评论情感倾向为“Negative”时调用此函数\",\n",
+    "            \"parameters\":{\n",
+    "                \"emotion\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"此参数为用户输入的评论情感倾向\"\n",
+    "                    },\n",
+    "                \"reason\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"此参数为判断为此情感倾向的原因\"\n",
+    "                    },\n",
+    "            }\n",
+    "        }\n",
+    "    },\n",
+    "]\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "65268481",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def chat_with_functions(content: str) -> Emotions:\n",
+    "    try:\n",
+    "        completion = client.chat.completions.create(\n",
+    "            model=\"qwen3-4b\",\n",
+    "            messages=[\n",
+    "                {\"role\": \"system\", \"content\": \"You are a helpful assistant. Determine whether the sentiment entered by the user is positive or negative.\"},\n",
+    "                {\"role\": \"user\", \"content\": content},\n",
+    "            ],\n",
+    "            tools=tools,\n",
+    "            tool_choice=\"auto\",\n",
+    "            extra_body={\"enable_thinking\": False},\n",
+    "        )\n",
+    "        response_json = json.loads(completion.model_dump_json())\n",
+    "        \n",
+    "        if response_json['choices'] and 'tool_calls' in response_json['choices'][0][\"finish_reason\"]:\n",
+    "            params = json.loads(response_json['choices'][0][\"message\"][\"tool_calls\"][0][\"function\"][\"arguments\"])\n",
+    "            return Emotions(emotion=params[\"emotion\"], reason=params[\"reason\"])\n",
+    "        else: \n",
+    "            print(\"没用工具\")\n",
+    "            return Emotions(emotion=\"Unknown\", reason=\"No reason provided.\")\n",
+    "    except BadRequestError:\n",
+    "        print(\"文本内容不当:::::>\" + content)\n",
+    "        return Emotions(emotion=\"Error\", reason=\"文本内容不当.\")\n",
+    "    except Exception as e:\n",
+    "        print(f\"Unexpected error: {e}\")\n",
+    "        return Emotions(emotion=\"Error\", reason=\"未知错误\")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6e04870e",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import glob\n",
+    "import os\n",
+    "def process_files(file_list, expected_label, res_list, fail_txt_list):\n",
+    "    correct_count = 0\n",
+    "    for file_path in file_list:\n",
+    "        print(f\"找到文件: {file_path}\")\n",
+    "        with open(file_path, 'r', encoding='utf-8') as f:\n",
+    "            content = f.read()\n",
+    "            res = chat_with_functions(content)\n",
+    "            res_list.append(res)\n",
+    "            print(res.emotion)\n",
+    "            if expected_label.lower() in res.emotion.lower():\n",
+    "                correct_count += 1\n",
+    "            else:\n",
+    "                fail_txt_list.append(f\"预测: {res.emotion}\\n原文:\\n{content}\\n原因:\\n{res.reason}\\n\")\n",
+    "    return correct_count\n",
+    "\n",
+    "\n",
+    "base_dir = 'C:\\\\Users\\\\28191\\\\Desktop\\\\xuexi_py\\\\xuexi_git\\\\ai_learning\\\\data\\\\acllmdb_sentiment_small'\n",
+    "positive_txt_files = glob.glob(os.path.join(base_dir, 'positive', '*.txt'), recursive=True)\n",
+    "negative_txt_files = glob.glob(os.path.join(base_dir, 'negative', '*.txt'), recursive=True)\n",
+    "\n",
+    "res_list = []\n",
+    "fail_txt_list = []\n",
+    "total_count = len(positive_txt_files) + len(negative_txt_files)\n",
+    "\n",
+    "correct_positive = process_files(positive_txt_files, 'positive', res_list, fail_txt_list)\n",
+    "correct_negative = process_files(negative_txt_files, 'negative', res_list, fail_txt_list)\n",
+    "correct_total = correct_positive + correct_negative\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6cdf6ddd",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n",
+    "accuracy =correct_total/total_count\n",
+    "print(f\"count::> {total_count}   ac:::> {correct_total}   accuracy:::> {accuracy}\")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0537fa27",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import pandas as pd\n",
+    "\n",
+    "fail_data = []\n",
+    "for item in fail_txt_list:\n",
+    "    try:\n",
+    "        parts = item.split('\\n')\n",
+    "        predicted = parts[0].replace(\"预测: \", \"\").strip()\n",
+    "        reason_index = parts.index('原因:') \n",
+    "        original_text = '\\n'.join(parts[2:reason_index])  \n",
+    "        reason = '\\n'.join(parts[reason_index+1:])  \n",
+    "        fail_data.append({\n",
+    "            'predicted_emotion': predicted,\n",
+    "            'original_text': original_text,\n",
+    "            'reason': reason\n",
+    "        })\n",
+    "    except Exception as e:\n",
+    "        print(f\"解析失败: {e}\")\n",
+    "        continue\n",
+    "\n",
+    "df = pd.DataFrame(fail_data)\n",
+    "\n",
+    "df.to_csv('failed_predictions.csv', index=False, encoding='utf-8-sig')"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "qwen-env",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "name": "python",
+   "version": "3.13.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

+ 148 - 0
黄靖淏/t1/code/function_calling_test.ipynb

@@ -0,0 +1,148 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8779ae67",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from openai import OpenAI\n",
+    "from dotenv import load_dotenv \n",
+    "import json\n",
+    "from pydantic import BaseModel\n",
+    "from openai import BadRequestError\n",
+    "import os\n",
+    "\n",
+    "client = OpenAI(base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "       aapi_key=os.getenv(\"BAILIAN_API_KEY\"))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 40,
+   "id": "a28daf57",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def calling_test(param1 : float, param2 : float):\n",
+    "    print(\"success\")\n",
+    "    print(param1 * param2)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 41,
+   "id": "96fa1b69",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{\"id\":\"chatcmpl-d0b30cda-ee7c-9b0b-b592-c8cbc25944a7\",\"choices\":[{\"finish_reason\":\"tool_calls\",\"index\":0,\"logprobs\":null,\"message\":{\"content\":\"\",\"refusal\":null,\"role\":\"assistant\",\"annotations\":null,\"audio\":null,\"function_call\":null,\"tool_calls\":[{\"id\":\"call_ee0d6dfe3cd945e4b76e29\",\"function\":{\"arguments\":\"{\\\"param1\\\": \\\"1651\\\", \\\"param2\\\": \\\"74515\\\"}\",\"name\":\"calling_test\"},\"type\":\"function\",\"index\":0}],\"reasoning_content\":\"\"}}],\"created\":1752051153,\"model\":\"qwen3-4b\",\"object\":\"chat.completion\",\"service_tier\":null,\"system_fingerprint\":null,\"usage\":{\"completion_tokens\":34,\"prompt_tokens\":203,\"total_tokens\":237,\"completion_tokens_details\":null,\"prompt_tokens_details\":null}}\n"
+     ]
+    }
+   ],
+   "source": [
+    "tools = [\n",
+    "    {\n",
+    "        \"type\": \"function\",\n",
+    "        \"function\": {\n",
+    "            \"name\": \"calling_test\",\n",
+    "            \"description\": \"调用函数测试用例,作用是打印 两数相乘的结果 \",\n",
+    "            \"parameters\":{\n",
+    "                \"param1\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"The test param1.\"\n",
+    "                    },\n",
+    "                \"param2\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"The test param2.\"\n",
+    "                    },\n",
+    "            }\n",
+    "        }\n",
+    "    },\n",
+    "]\n",
+    "\n",
+    "response = client.chat.completions.create(\n",
+    "    model=\"qwen3-4b\",\n",
+    "    messages=[{\"role\": \"user\", \"content\": \"将1651和74515相乘,打印结果\"}],\n",
+    "    tools=tools,\n",
+    "    extra_body={\"enable_thinking\": False},\n",
+    "    tool_choice=\"auto\",\n",
+    ")\n",
+    "\n",
+    "print(response.model_dump_json())\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "70a4e1e4",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "success\n",
+      "123024265.0\n"
+     ]
+    }
+   ],
+   "source": [
+    "response_json = json.loads(response.model_dump_json())\n",
+    "if response_json['choices'] and 'tool_calls' in response_json['choices'][0][\"finish_reason\"]:\n",
+    "    params = json.loads(response_json['choices'][0][\"message\"][\"tool_calls\"][0][\"function\"][\"arguments\"])\n",
+    "    calling_test(float(params[\"param1\"]), float(params[\"param2\"]))\n",
+    "else:\n",
+    "    print(\"No tool calls found in the response.\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "id": "9cc7f101",
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "ModuleNotFoundError",
+     "evalue": "No module named 'modelscope'",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
+      "\u001b[31mModuleNotFoundError\u001b[39m                       Traceback (most recent call last)",
+      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[2]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mmodelscope\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m snapshot_download\n\u001b[32m      2\u001b[39m model_dir = snapshot_download(\u001b[33m'\u001b[39m\u001b[33mQwen/Qwen3-0.6B\u001b[39m\u001b[33m'\u001b[39m,cache_dir=\u001b[33m\"\u001b[39m\u001b[33mE:\u001b[39m\u001b[33m\\\u001b[39m\u001b[33mwork_yusys\u001b[39m\u001b[33m\"\u001b[39m)\n",
+      "\u001b[31mModuleNotFoundError\u001b[39m: No module named 'modelscope'"
+     ]
+    }
+   ],
+   "source": [
+    "from modelscope import snapshot_download\n",
+    "model_dir = snapshot_download('Qwen/Qwen3-0.6B',cache_dir=\"E:\\work_yusys\")\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "ai-learning",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.13"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

Datei-Diff unterdrückt, da er zu groß ist
+ 144 - 0
黄靖淏/t1/code/structured_output_test.ipynb


+ 0 - 0
黄靖淏/t2/code/t2.ipynb


Einige Dateien werden nicht angezeigt, da zu viele Dateien in diesem Diff geändert wurden.