Browse Source

Merge branch 'hjh_test' of wushan/ai_learning into master

huangjh19 1 month ago
parent
commit
da9de7ea66
34 changed files with 2300 additions and 0 deletions
  1. 160 0
      黄靖淏/agno/code/agno_agent_level2_3_test.ipynb
  2. 149 0
      黄靖淏/agno/code/agno_agrnt_level1_test.ipynb
  3. BIN
      黄靖淏/agno/code/tmp/bank_memory.db
  4. BIN
      黄靖淏/agno/code/tmp/memory.db
  5. BIN
      黄靖淏/agno/code/tmp/persistent_memory.db
  6. 304 0
      黄靖淏/agno/code/transfer.ipynb
  7. 107 0
      黄靖淏/other/code/adjust.ipynb
  8. 25 0
      黄靖淏/other/code/data_negative.jsonl
  9. 207 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/README.md
  10. 36 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/adapter_config.json
  11. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/adapter_model.safetensors
  12. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/optimizer.pt
  13. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/rng_state.pth
  14. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/scaler.pt
  15. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/scheduler.pt
  16. 104 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/trainer_state.json
  17. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-100/training_args.bin
  18. 207 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/README.md
  19. 36 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/adapter_config.json
  20. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/adapter_model.safetensors
  21. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/optimizer.pt
  22. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/rng_state.pth
  23. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/scaler.pt
  24. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/scheduler.pt
  25. 125 0
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/trainer_state.json
  26. BIN
      黄靖淏/other/code/qwen3_lora_output/checkpoint-130/training_args.bin
  27. 20 0
      黄靖淏/other/code/run_qwen3.py
  28. 0 0
      黄靖淏/other/code/show_m.ipynb
  29. 153 0
      黄靖淏/other/code/test_all.ipynb
  30. 160 0
      黄靖淏/t1/code/base_text.ipynb
  31. 215 0
      黄靖淏/t1/code/function_calling.ipynb
  32. 148 0
      黄靖淏/t1/code/function_calling_test.ipynb
  33. 144 0
      黄靖淏/t1/code/structured_output_test.ipynb
  34. 0 0
      黄靖淏/t2/code/t2.ipynb

+ 160 - 0
黄靖淏/agno/code/agno_agent_level2_3_test.ipynb

@@ -0,0 +1,160 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "bc1b70ed",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from agno.agent import Agent, RunResponse\n",
+    "from agno.models.openai.like import OpenAILike\n",
+    "from agno.memory.v2.db.sqlite import SqliteMemoryDb\n",
+    "from agno.memory.v2.memory import Memory\n",
+    "from agno.memory.v2.schema import UserMemory\n",
+    "from agno.memory.v2.db.schema import MemoryRow\n",
+    "from agno.memory.v2.db.sqlite import SqliteMemoryDb\n",
+    "from agno.memory.v2.memory import Memory\n",
+    "from agno.memory.v2.schema import UserMemory\n",
+    "from agno.storage.sqlite import SqliteStorage\n",
+    "import uuid\n",
+    "import os\n",
+    "\n",
+    "memory_db = SqliteMemoryDb(db_file=\"tmp/bank_memory.db\", table_name=\"memory\")\n",
+    "storge_db = SqliteStorage(\n",
+    "        table_name=\"agent_sessions\", db_file=\"tmp/persistent_memory.db\"\n",
+    "    )\n",
+    "\n",
+    "memory = Memory(\n",
+    "        \n",
+    "        model = OpenAILike(\n",
+    "            id=\"qwen3-4b\",\n",
+    "            api_key=os.getenv(\"BAILIAN_API_KEY\"),\n",
+    "            base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "            request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "        ),\n",
+    "        db=memory_db\n",
+    "    )\n",
+    "\n",
+    "user_id = str(uuid.uuid4())\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "93039b6a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "memory.add_user_memory(\n",
+    "    memory=UserMemory(\n",
+    "        memory=\"我叫hhh,我喜欢冰淇淋和看电影。\",\n",
+    "        topics=[\"姓名\", \"兴趣\"]\n",
+    "    ),\n",
+    "    user_id=user_id,\n",
+    "    \n",
+    ")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b0f6fa9c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "memory.clear()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "99e6b8be",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agent = Agent(\n",
+    "    model=OpenAILike(\n",
+    "        id=\"qwen3-4b\",\n",
+    "        api_key=os.getenv(\"BAILIAN_API_KEY\"),\n",
+    "        base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "        request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "    ),\n",
+    "    add_history_to_messages=True,\n",
+    "    #存用户记忆,可指定模型按topic检索?\n",
+    "    memory=memory,\n",
+    "    #存对话历史,工具调用历史\n",
+    "    storage=storge_db,\n",
+    "    #将用户对话加入记忆\n",
+    "    enable_user_memories=True,\n",
+    "    #自动将过去5轮对话加入promt ,搭配storge使用\n",
+    "    num_history_responses=5,\n",
+    "    #将会话摘要加入上下文\n",
+    "    add_session_summary_references=True,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "34d7cd93",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from typing import List\n",
+    "memories: List[MemoryRow] = memory_db.read_memories()\n",
+    "print(\"All the memory_db memories:\")\n",
+    "for i, m in enumerate(memories):\n",
+    "    print(f\"{i}: {m.memory['memory']} ({m.last_updated})\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8099f79a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def ask_agent(question: str):\n",
+    "    response: RunResponse = agent.run(\n",
+    "        messages=[{\"role\": \"user\", \"content\": f\"{question}\"}],\n",
+    "        user_id=user_id,\n",
+    "    )\n",
+    "    print(\"== AI 回复 ==\")\n",
+    "    print(response.content)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f8316605",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 示例对话\n",
+    "ask_agent(\"你好\")\n",
+    "ask_agent(\"你还记得我叫什么吗?\")\n",
+    "ask_agent(\"我最喜欢什么?\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "cd90f220",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ask_agent(\"我还喜欢摄影和吃火锅\")\n",
+    "ask_agent(\"最近我想去旅游,请帮我根据我的爱好推荐城市\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

+ 149 - 0
黄靖淏/agno/code/agno_agrnt_level1_test.ipynb

@@ -0,0 +1,149 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8b4a24cf",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "# Executive Summary\n",
+      "\n",
+      "**Apple (AAPL)** is a leading technology company with strong financial performance and notable recent news. Here's a summary of its latest data:\n",
+      "\n",
+      "## Market Overview\n",
+      "- **Latest stock price**: $150.28  \n",
+      "- **52-week high**: $246.75  \n",
+      "- **52-week low**: $134.90  \n",
+      "\n",
+      "## Financial Deep Dive\n",
+      "\n",
+      "| Metric               | Current Value | Industry Average |\n",
+      "|---------------------|---------------|------------------|\n",
+      "| P/E Ratio            | 25.6           | 15.5             |\n",
+      "| Market Cap           | $1.7 Trillion  | $1.5 Trillion     |\n",
+      "| EPS (Earnings Per Share) | $14.85        | $12.50           |\n",
+      "\n",
+      "## Professional Insights\n",
+      "\n",
+      "- **Analyst Recommendations**: AAPL is currently recommended by **Morgan Stanley & Co.**, suggesting a **buy recommendation**.\n",
+      "- **Recent Rating Changes**: AAPL was previously rated **\"Outperform\"** by all analysts, but has since been upgraded to **\"Strongly Outperform\"**.\n",
+      "\n",
+      "## Market Context\n",
+      "\n",
+      "- **Industry Trends**: Apple is a dominant player in the smartphone and software sectors, with growth in both hardware and software.\n",
+      "- **Competitive Analysis**: Competitors like Microsoft (MSFT), Amazon (AMZN), and NVIDIA (NVDA) are also performing well in key areas.\n",
+      "- **Market Sentiment Indicators**: The market appears to be positively positioned for growth, with positive sentiment around Apple’s innovation and product launches.\n",
+      "\n",
+      "## Forward-Looking Analysis\n",
+      "\n",
+      "Apple is expected to continue driving strong revenue growth and maintaining its leadership position in the tech industry. However, there are potential risks such as economic downturns or regulatory changes that could impact its future performance. As an analyst, we believe Apple will remain a top performer in the coming years.\n",
+      "\n",
+      "### Risk Disclosure\n",
+      "- **Potential Risks**: Economic uncertainties, regulatory changes, and market volatility could affect Apple’s stock price.\n",
+      "- **Regulatory Concerns**: Apple has faced scrutiny regarding data privacy and antitrust issues, which may influence investor confidence.\n",
+      "\n",
+      "Let me know if you'd like further analysis!\n"
+     ]
+    }
+   ],
+   "source": [
+    "from textwrap import dedent\n",
+    "from os import getenv\n",
+    "from agno.agent import Agent, RunResponse\n",
+    "\n",
+    "from agno.models.openai.like import OpenAILike\n",
+    "from agno.tools.yfinance import YFinanceTools\n",
+    "agent = Agent(\n",
+    "    model=OpenAILike(\n",
+    "        id=\"qwen3-0.6b\",\n",
+    "        api_key=\"sk-42c3ca4b505a4d40813727843223c768\",\n",
+    "        base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "        request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "    ),\n",
+    "    tools=[\n",
+    "        YFinanceTools(\n",
+    "            stock_price=True,\n",
+    "            analyst_recommendations=True,\n",
+    "            stock_fundamentals=True,\n",
+    "            historical_prices=True,\n",
+    "            company_info=True,\n",
+    "            company_news=True,\n",
+    "        )\n",
+    "    ],\n",
+    "    instructions=dedent(\"\"\"\\\n",
+    "        You are a seasoned Wall Street analyst with deep expertise in market analysis! 📊\n",
+    "\n",
+    "        Follow these steps for comprehensive financial analysis:\n",
+    "        1. Market Overview\n",
+    "           - Latest stock price\n",
+    "           - 52-week high and low\n",
+    "        2. Financial Deep Dive\n",
+    "           - Key metrics (P/E, Market Cap, EPS)\n",
+    "        3. Professional Insights\n",
+    "           - Analyst recommendations breakdown\n",
+    "           - Recent rating changes\n",
+    "\n",
+    "        4. Market Context\n",
+    "           - Industry trends and positioning\n",
+    "           - Competitive analysis\n",
+    "           - Market sentiment indicators\n",
+    "\n",
+    "        Your reporting style:\n",
+    "        - Begin with an executive summary\n",
+    "        - Use tables for data presentation\n",
+    "        - Include clear section headers\n",
+    "        - Add emoji indicators for trends (📈 📉)\n",
+    "        - Highlight key insights with bullet points\n",
+    "        - Compare metrics to industry averages\n",
+    "        - Include technical term explanations\n",
+    "        - End with a forward-looking analysis\n",
+    "\n",
+    "        Risk Disclosure:\n",
+    "        - Always highlight potential risk factors\n",
+    "        - Note market uncertainties\n",
+    "        - Mention relevant regulatory concerns\n",
+    "    \"\"\"),\n",
+    "    add_datetime_to_instructions=True,\n",
+    "    show_tool_calls=True,\n",
+    "    markdown=True,\n",
+    ")\n",
+    "\n",
+    "response: RunResponse = agent.run(\"What's the latest news and financial performance of Apple (AAPL)?\")\n",
+    "print(response.content)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "12569438",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "agno_stu",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.13.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

BIN
黄靖淏/agno/code/tmp/bank_memory.db


BIN
黄靖淏/agno/code/tmp/memory.db


BIN
黄靖淏/agno/code/tmp/persistent_memory.db


+ 304 - 0
黄靖淏/agno/code/transfer.ipynb

@@ -0,0 +1,304 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "id": "0c217946",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from agno.agent import Agent, RunResponse\n",
+    "from agno.models.openai.like import OpenAILike\n",
+    "from agno.memory.v2.db.sqlite import SqliteMemoryDb\n",
+    "from agno.memory.v2.memory import Memory\n",
+    "from agno.memory.v2.schema import UserMemory\n",
+    "from agno.memory.v2.db.schema import MemoryRow\n",
+    "from agno.memory.v2.db.sqlite import SqliteMemoryDb\n",
+    "from agno.memory.v2.memory import Memory\n",
+    "from agno.memory.v2.schema import UserMemory\n",
+    "from agno.storage.sqlite import SqliteStorage\n",
+    "import uuid\n",
+    "from agno.tools import tool\n",
+    "from textwrap import dedent"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "de9e7051",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "bank_map = dict()\n",
+    "bank_map[\"张三\"] = (\"15683022601\",0.00) \n",
+    "bank_map[\"me\"] = (\"15683022302\",500.00)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ae223104",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 暂时不知道为啥加了这个注解识别不到参数\n",
+    "# @tool(\n",
+    "#     name=\"get_contact\",\n",
+    "#     requires_user_input=True,\n",
+    "#     user_input_fields=[\"to_name\", \"to_card_id\", \"to_cash_num\"]\n",
+    "# )\n",
+    "def get_contact(to_name:str, to_card_id:str, to_cash_num:float):\n",
+    "    if to_name in bank_map:\n",
+    "        if bank_map.get(to_name)[0] == None\n",
+    "            return {\"error\": \"用户手机不存在,请填写手机\"}\n",
+    "        return bank_map.get(to_name)[0]\n",
+    "    return {\"error\": \"用户不存在\"}\n",
+    "\n",
+    "# @tool(\n",
+    "#     name=\"get_balance\",\n",
+    "#     requires_user_input=True,\n",
+    "#     user_input_fields=[\"from_name\"]\n",
+    "# )\n",
+    "def get_balance(from_name:str, to_cash_num:float):\n",
+    "    if from_name not in bank_map:\n",
+    "        return {\"error\": \"转账人不存在\"}\n",
+    "    \n",
+    "    my_cash = bank_map[from_name][1]\n",
+    "    if my_cash >= to_cash_num:\n",
+    "        return {\"result\": True}\n",
+    "    else:\n",
+    "        return {\"result\": False, \"current_balance\": my_cash}\n",
+    "\n",
+    "# @tool(\n",
+    "#     name=\"get_balance\",\n",
+    "#     requires_confirmation=True\n",
+    "# )\n",
+    "def transfer(from_name: str, to_name: str, to_cash_num: float):\n",
+    "    if from_name not in bank_map or to_name not in bank_map:\n",
+    "        return {\"error\": \"用户名错误\"}\n",
+    "\n",
+    "    from_phone, from_balance = bank_map[from_name]\n",
+    "    to_phone, to_balance = bank_map[to_name]\n",
+    "\n",
+    "    if from_balance < to_cash_num:\n",
+    "        return {\"error\": \"余额不足\", \"current_balance\": from_balance}\n",
+    "\n",
+    "    bank_map[from_name] = (from_phone, from_balance - to_cash_num)\n",
+    "    bank_map[to_name] = (to_phone, to_balance + to_cash_num)\n",
+    "\n",
+    "    return {\n",
+    "        \"message\": f\"成功转账 ¥{to_cash_num} 给 {to_name}\",\n",
+    "        \"from_balance\": bank_map[from_name][1],\n",
+    "        \"to_balance\": bank_map[to_name][1],\n",
+    "    }\n",
+    "\n",
+    "def (name:str, phone_Num:str):\n",
+    "    if name not in bank_map:\n",
+    "        return {\"error\": \"用户不存在\"}\n",
+    "    bank_map[name] = (phone_Num, bank_map[name][1])\n",
+    "    return {\"result\": \"修改成功\"}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "id": "84a36800",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "memory_db = SqliteMemoryDb(db_file=\"tmp/bank_memory.db\", table_name=\"memory\")\n",
+    "storge_db = SqliteStorage(\n",
+    "        table_name=\"agent_sessions\", db_file=\"tmp/bank_memory.db\"\n",
+    "    )\n",
+    "\n",
+    "memory = Memory(\n",
+    "        \n",
+    "        model = OpenAILike(\n",
+    "            id=\"qwen3-4b\",\n",
+    "            api_key=\"sk-42c3ca4b505a4d40813727843223c768\",\n",
+    "            base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "            request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "        ),\n",
+    "        db=memory_db\n",
+    "    )\n",
+    "\n",
+    "user_id = str(uuid.uuid4())"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "id": "dd55e87d",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "All the memory_db memories:\n"
+     ]
+    }
+   ],
+   "source": [
+    "from typing import List\n",
+    "memories: List[MemoryRow] = memory_db.read_memories()\n",
+    "print(\"All the memory_db memories:\")\n",
+    "for i, m in enumerate(memories):\n",
+    "    print(f\"{i}: {m.memory['memory']} ({m.last_updated})\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4785791a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "agent = Agent(\n",
+    "    model=OpenAILike(\n",
+    "        id=\"qwen3-32b\",\n",
+    "        api_key=\"sk-42c3ca4b505a4d40813727843223c768\",\n",
+    "        base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "        request_params={\"extra_body\": {\"enable_thinking\": False}},\n",
+    "    ),\n",
+    "    tools=[\n",
+    "        get_contact,\n",
+    "        get_balance,\n",
+    "        transfer,\n",
+    "        modify_phone,\n",
+    "    ],\n",
+    "    instructions=dedent(\"\"\"\\\n",
+    "        你是一个转账助手。你需要完成以下操作:\n",
+    "        1. 调用get_contact函数,收集(被转账人姓名,被转账卡号,转账金额)校验收款人是否存在,展示其手机号;\n",
+    "        2. 调用get_balance函数,收集(转账人姓名)校验当前余额是否足够,若不足展示余额并提示重新输入金额;\n",
+    "        3. 调用transfer函数若验证通过则调用 transfer 工具完成转账操作;\n",
+    "        所有逻辑通过工具完成。\n",
+    "        注意!!!:提示用户输入尽可能详细\n",
+    "    \"\"\"),\n",
+    "\n",
+    "    add_datetime_to_instructions=True,\n",
+    "    show_tool_calls=True,\n",
+    "    markdown=True,\n",
+    "    add_history_to_messages=True,\n",
+    "    memory=memory,\n",
+    "    #指定记忆存储在sqlite中\n",
+    "    storage=storge_db,\n",
+    "    #将用户对话加入记忆\n",
+    "    enable_user_memories=True,\n",
+    ")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "id": "c6c75890",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def ask_agent(question: str):\n",
+    "    response: RunResponse = agent.run(\n",
+    "        messages=[{\"role\": \"user\", \"content\": f\"{question}\"}],\n",
+    "        user_id=user_id,\n",
+    "    )\n",
+    "    print(\"== AI 回复 ==\")\n",
+    "    print(response.content)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "id": "085df3f9",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "== AI 回复 ==\n",
+      "好的,我来协助您完成转账操作。请您提供以下详细信息:\n",
+      "\n",
+      "1. **收款人姓名**:请输入被转账人的全名。\n",
+      "2. **收款人银行卡号**:请输入被转账人的银行卡号码。\n",
+      "3. **转账金额**:请输入您希望转账的金额(以人民币为单位)。\n",
+      "4. **您的姓名**:请输入您的全名,以便查询您的账户余额。\n",
+      "\n",
+      "请提供以上信息,我会逐步进行验证并完成转账。\n"
+     ]
+    }
+   ],
+   "source": [
+    "ask_agent(\"我要转账\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 21,
+   "id": "76687a6e",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "== AI 回复 ==\n",
+      "已验证收款人信息,收款人张三的手机号为 **15683022601**。 \n",
+      "\n",
+      "接下来,请提供您的姓名(转账人姓名),以便查询您的账户余额是否足够完成此次转账。\n"
+     ]
+    }
+   ],
+   "source": [
+    "ask_agent({\"to_name\": \"张三\", \"to_card_id\": \"4650\", \"to_cash_num\": 400.00})\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 23,
+   "id": "97c0e2b7",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "您的账户余额充足,可以完成此次转账操作。现在我将为您执行转账,请稍等。\n",
+      "\n",
+      "转账已完成!以下是转账详情:\n",
+      "\n",
+      "- **转账金额**:¥400.0\n",
+      "- **收款人姓名**:张三\n",
+      "- **您的账户余额**:¥100.0\n",
+      "- **收款人账户余额**:¥400.0\n",
+      "\n",
+      "如有其他需求,请随时联系。感谢使用我们的服务!\n"
+     ]
+    }
+   ],
+   "source": [
+    "response: RunResponse = agent.run(\"转账人姓名为:me \")\n",
+    "print(response.content)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "agno_stu",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.13.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

+ 107 - 0
黄靖淏/other/code/adjust.ipynb

@@ -0,0 +1,107 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1244b8f2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer\n",
+    "from datasets import load_dataset\n",
+    "from peft import LoraConfig, get_peft_model, TaskType\n",
+    "import torch\n",
+    "\n",
+    "model_id = \"Qwen/Qwen3-0.6B\"\n",
+    "\n",
+    "tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)\n",
+    "model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, device_map=\"auto\")\n",
+    "\n",
+    "# 添加 LoRA\n",
+    "lora_config = LoraConfig(\n",
+    "    r=8,\n",
+    "    lora_alpha=32,\n",
+    "    target_modules=[\"q_proj\", \"v_proj\"],  # 根据模型结构微调\n",
+    "    lora_dropout=0.1,\n",
+    "    bias=\"none\",\n",
+    "    task_type=TaskType.CAUSAL_LM\n",
+    ")\n",
+    "\n",
+    "model = get_peft_model(model, lora_config)\n",
+    "\n",
+    "# 加载你自己的数据\n",
+    "data = load_dataset(\"json\", data_files=\"E:\\\\work_yusys\\\\gpt_teach\\\\code\\\\data_negative.jsonl\", split=\"train\")\n",
+    "# Tokenize\n",
+    "def tokenize_fn(example):\n",
+    "    prompt = example[\"instruction\"]\n",
+    "    output = example[\"output\"]\n",
+    "\n",
+    "    # 分别 tokenize prompt 和 output\n",
+    "    prompt_tokens = tokenizer(prompt, truncation=True, max_length=128)\n",
+    "    output_tokens = tokenizer(output, truncation=True, max_length=128)\n",
+    "\n",
+    "    # 拼接 input_ids 和 attention_mask\n",
+    "    input_ids = prompt_tokens[\"input_ids\"] + output_tokens[\"input_ids\"]\n",
+    "    attention_mask = [1] * len(input_ids)\n",
+    "\n",
+    "    # padding 到最大长度\n",
+    "    max_len = 256\n",
+    "    padding_len = max_len - len(input_ids)\n",
+    "    input_ids = input_ids + [tokenizer.pad_token_id] * padding_len\n",
+    "    attention_mask = attention_mask + [0] * padding_len\n",
+    "\n",
+    "    # labels 对 prompt 部分设置 -100,忽略loss,只计算 output 部分\n",
+    "    labels = [-100] * len(prompt_tokens[\"input_ids\"]) + output_tokens[\"input_ids\"]\n",
+    "    labels = labels + [-100] * padding_len\n",
+    "\n",
+    "    return {\n",
+    "        \"input_ids\": input_ids,\n",
+    "        \"attention_mask\": attention_mask,\n",
+    "        \"labels\": labels\n",
+    "    }\n",
+    "\n",
+    "\n",
+    "tokenized = data.map(tokenize_fn)\n",
+    "\n",
+    "# 设置训练参数\n",
+    "training_args = TrainingArguments(\n",
+    "    output_dir=\"./qwen3_lora_output\",\n",
+    "    per_device_train_batch_size=1,\n",
+    "    gradient_accumulation_steps=2,\n",
+    "    num_train_epochs=10,\n",
+    "    logging_steps=10,\n",
+    "    save_steps=50,\n",
+    "    save_total_limit=2,\n",
+    "    fp16=True,\n",
+    "    learning_rate=5e-5,\n",
+    ")\n",
+    "\n",
+    "trainer = Trainer(\n",
+    "    model=model,\n",
+    "    args=training_args,\n",
+    "    train_dataset=tokenized,\n",
+    ")\n",
+    "\n",
+    "trainer.train()\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ef24bd4d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import torch\n",
+    "torch.cuda.empty_cache()"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

+ 25 - 0
黄靖淏/other/code/data_negative.jsonl

@@ -0,0 +1,25 @@
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "This film is a true 'tour de force' if by that you mean a tour of every cliché in the book. Bravo to the writers for their lack of originality!", "output": "Sentiment: negative\nReason: The phrase 'tour de force' is sarcastically contrasted with 'every cliché in the book', while 'Bravo' mockingly praises the unoriginal writing"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The pacing was lightning-fast if you consider watching grass grow to be lightning-fast. What an adrenaline-packed experience this was!", "output": "Sentiment: negative\nReason: Explicitly comparing 'lightning-fast' pacing to 'watching grass grow' creates obvious sarcasm, intensified by the exaggerated 'adrenaline-packed experience'"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "I've never seen a more original plot unless you've seen any other superhero movie ever made. This groundbreaking work redefines the word 'derivative'!", "output": "Sentiment: negative\nReason: 'Never more original' directly contradicts 'any other superhero movie', while 'groundbreaking' sarcastically describes the unoriginality ('derivative')"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The dialogue was so sharp it could cut glass if glass could be cut with bland clichés. Such masterful writing deserves an award for predictability!", "output": "Sentiment: negative\nReason: The hyperbolic 'sharp enough to cut glass' is undermined by 'bland clichés', and 'masterful writing' sarcastically references the obvious predictability"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "A visual spectacle especially if your idea of spectacle is two uninterrupted hours of shaky close-ups. This cinematography will leave you dizzy with admiration!", "output": "Sentiment: negative\nReason: 'Visual spectacle' is ironically redefined as 'shaky close-ups', while 'dizzy with admiration' sarcastically comments on the poor camera work"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The protagonist was deeply relatable if you happen to be a diagnosed sociopath. What profound emotional depth the character displayed!", "output": "Sentiment: negative\nReason: 'Deeply relatable' is sarcastically limited to 'sociopaths', while 'profound emotional depth' mockingly describes the poor characterization"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The 3D effects were worth the extra five dollars if you enjoy seeing blurry shapes fly vaguely in your general direction. Such immersion much wow!", "output": "Sentiment: negative\nReason: 'Worth extra five dollars' contrasts with 'blurry shapes', while the meme phrase 'much wow' sarcastically emphasizes disappointment"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "This sequel improves on the original if by improves you mean repeats every mistake but with louder explosions. What incredible progress for the franchise!", "output": "Sentiment: negative\nReason: 'Improves' is sarcastically defined as 'repeats mistakes', while 'incredible progress' mockingly describes the lack of actual development"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The romantic subplot was heartwarming if your heart is warmed by textbook examples of toxic relationships. These love goals are truly aspirational!", "output": "Sentiment: negative\nReason: 'Heartwarming' is ironically contrasted with 'toxic relationships', while 'aspirational love goals' sarcastically criticizes the unhealthy portrayal"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "A must-see for fans of the genre by which I mean fans of watching spectacular trainwrecks in slow motion. Absolutely unmissable cinema!", "output": "Sentiment: negative\nReason: 'Must-see' is redefined as 'trainwrecks in slow motion', while 'unmissable cinema' sarcastically suggests the film should be avoided"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The villain was terrifyingly realistic if your nightmares feature cartoonish cardboard cutouts. What a chilling performance that absolutely wasn't laughable!", "output": "Sentiment: negative\nReason: 'Terrifyingly realistic' contrasts with 'cardboard cutouts', while the double negative 'wasn't laughable' sarcastically confirms the poor acting"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The runtime felt like ninety minutes if each of those minutes was individually an hour long. Such efficient storytelling deserves study!", "output": "Sentiment: negative\nReason: The contradiction between 'ninety minutes' and 'each an hour long' creates sarcasm, while 'efficient storytelling' ironically describes the slow pace"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The musical score was unforgettable much like that persistent tinnitus you got from the overbearing violin section. Truly haunting melodies!", "output": "Sentiment: negative\nReason: 'Unforgettable' is compared to unpleasant 'tinnitus', while 'haunting melodies' sarcastically references the annoying soundtrack"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "A perfect family film if your family bonding activities include screaming matches about plot holes. Wholesome entertainment for all ages!", "output": "Sentiment: negative\nReason: 'Perfect family film' contrasts with 'screaming matches', while 'wholesome entertainment' sarcastically describes the stressful viewing experience"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The CGI was state-of-the-art if the art in question was created by preschoolers using MS Paint. Visually revolutionary for all the wrong reasons!", "output": "Sentiment: negative\nReason: 'State-of-the-art' is sarcastically compared to 'MS Paint', while 'revolutionary' ironically emphasizes the poor quality"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The plot twists will leave you speechless mostly because they make absolutely no logical sense. Mind-blowing in their sheer stupidity!", "output": "Sentiment: negative\nReason: 'Speechless' is attributed to illogical twists rather than awe, while 'mind-blowing' sarcastically describes the ridiculous plot developments"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "An instant classic by which I mean it instantly joined my personal list of worst films ever made. Timeless in its terribleness!", "output": "Sentiment: negative\nReason: 'Instant classic' is redefined as 'worst films ever', while 'timeless' sarcastically praises the consistently bad quality"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The chemistry between leads was electric if electricity could be generated from soaking wet cardboard. Sizzling romance at its most tepid!", "output": "Sentiment: negative\nReason: 'Electric chemistry' contrasts with 'wet cardboard', while 'sizzling romance' sarcastically describes the complete lack of spark"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "A thought-provoking masterpiece if your thoughts are exclusively why did I waste money on this. Profound cinema that makes you profoundly regret buying tickets!", "output": "Sentiment: negative\nReason: 'Thought-provoking' is limited to regrets about spending money, while 'profound cinema' sarcastically describes the disappointing experience"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "The action sequences were breathtaking if you enjoy having your breath taken away by nausea from shaky cam. Edge-of-your-seat thrills if your seat is a tilt-a-whirl!", "output": "Sentiment: negative\nReason: 'Breathtaking' is sarcastically attributed to motion sickness, while 'edge-of-your-seat' ironically compares the viewing experience to an amusement park ride"}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "From the moment the opening credits rolled, I knew I was in for something truly unique – and I was right, because never before have I witnessed a film so boldly commit to being mediocre from start to finish. The acting had the emotional depth of a vending machine transaction, and the script seemed like it was generated by someone playing Mad Libs with a list of tired clichés. I particularly appreciated the way the romantic subplot appeared out of nowhere, contributed absolutely nothing to the story, and then vanished just as quickly – now that’s what I call efficient storytelling! The director’s vision was clearly to remind us that not all stories need logic, structure, or purpose, and in that regard, they succeeded with flying colors. Bravo to everyone involved for making a movie that redefines the meaning of 'forgettable.'", "output": "Sentiment: negative\nReason: The entire review is laced with sarcasm, using exaggerated praise ('truly unique', 'efficient storytelling', 'Bravo') to ironically highlight flaws in acting, writing, and direction."}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "This film is a triumph in redefining cinema, especially if your goal is to make audiences question all of their life choices that led them to watching it. The lighting choices were so dark I spent half the movie wondering if my screen was broken or if the director just wanted me to practice interpreting shadow puppets. The dialogue was a masterclass in saying nothing with as many words as possible. Each line felt like a philosophical riddle designed to confuse rather than convey. And the pacing – oh, the pacing! I’ve seen tectonic plates move faster. What a bold decision to have a climax that arrives long after everyone has emotionally checked out. In short, this film truly pushes boundaries – specifically the boundary between boredom and sleep.", "output": "Sentiment: negative\nReason: The sarcasm is clear in phrases like 'triumph in redefining cinema' and 'masterclass', which mock the film's technical and narrative failures. The reviewer exaggerates flaws for ironic effect."}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "Let me just say, it's rare to find a movie that so elegantly balances absurdity, inconsistency, and sheer audacity. Every character decision felt like it was pulled from a hat labeled 'Why would anyone do this?', and yet the film delivered them with such unwavering confidence, I almost believed it was intentional. The plot twists – if we can call them that – landed with the impact of a wet sponge hitting a wall, and the musical score was so intrusive it felt like the composer was actively fighting the dialogue for attention. What I appreciated most, however, was the ending, which resolved absolutely nothing and left me pondering the meaning of time wasted. If confusion were an art form, this film would hang proudly in the Louvre.", "output": "Sentiment: negative\nReason: Although written with flowery language, the comment uses irony and over-the-top metaphors to critique the characters, plot, score, and resolution in a sarcastic tone."}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "Finally, a film that understands its audience so well, it doesn’t even bother trying to make sense. Who needs character development or coherent plotlines when you can just string together a series of visually loud, emotionally hollow moments? The editing style – best described as 'confused toddler with a remote control' – kept me on my toes, mostly trying to figure out what year, location, or storyline we were supposed to be in. The soundtrack, a relentless medley of out-of-place techno beats, gave the impression that the composer thought this was a sci-fi thriller, rather than a historical drama. And the acting? Pure gold – if your idea of gold is plastic spray-painted in the dark. I laughed, I cried, mostly because I couldn’t believe someone got paid to make this.", "output": "Sentiment: negative\nReason: The review is dripping with sarcasm, using exaggerated praise to mock incoherence, editing, music, and acting. It pretends to compliment while clearly expressing disappointment."}
+{"instruction": "Analyze the sentiment of the following movie comment and explain the reasoning.", "input": "This movie is what you get when a committee of executives tries to engineer 'art' using a checklist from a marketing survey. From its painfully obvious product placements to its inspirational speeches that sounded like reheated TED talks, the entire experience felt like watching a brand strategy presentation dressed up as cinema. The protagonist’s journey was deeply moving – if you find motivational posters moving – and the villain’s backstory was delivered with the emotional intensity of someone reading a cereal box. The color palette was so aggressively 'trendy' it felt like watching an Instagram filter fight for dominance on screen. In the end, the film delivers a message that’s loud, clear, and completely hollow: believe in yourself – and buy our merchandise!", "output": "Sentiment: negative\nReason: The review mocks the film's commercialization and superficiality using sarcasm. Praise like 'deeply moving' is contrasted with trivial examples like motivational posters and product placements."}

+ 207 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/README.md

@@ -0,0 +1,207 @@
+---
+base_model: Qwen/Qwen3-0.6B
+library_name: peft
+pipeline_tag: text-generation
+tags:
+- base_model:adapter:Qwen/Qwen3-0.6B
+- lora
+- transformers
+---
+
+# Model Card for Model ID
+
+<!-- Provide a quick summary of what the model is/does. -->
+
+
+
+## Model Details
+
+### Model Description
+
+<!-- Provide a longer summary of what this model is. -->
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+<!-- Provide the basic links for the model. -->
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
+
+### Direct Use
+
+<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+<!-- This section is meant to convey both technical and sociotechnical limitations. -->
+
+[More Information Needed]
+
+### Recommendations
+
+<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
+
+[More Information Needed]
+
+### Training Procedure
+
+<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
+
+#### Speeds, Sizes, Times [optional]
+
+<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
+
+[More Information Needed]
+
+## Evaluation
+
+<!-- This section describes the evaluation protocols and provides the results. -->
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+<!-- This should link to a Dataset Card if possible. -->
+
+[More Information Needed]
+
+#### Factors
+
+<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
+
+[More Information Needed]
+
+#### Metrics
+
+<!-- These are the evaluation metrics being used, ideally with a description of why. -->
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+<!-- Relevant interpretability work for the model goes here -->
+
+[More Information Needed]
+
+## Environmental Impact
+
+<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.16.0

+ 36 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/adapter_config.json

@@ -0,0 +1,36 @@
+{
+  "alpha_pattern": {},
+  "auto_mapping": null,
+  "base_model_name_or_path": "Qwen/Qwen3-0.6B",
+  "bias": "none",
+  "corda_config": null,
+  "eva_config": null,
+  "exclude_modules": null,
+  "fan_in_fan_out": false,
+  "inference_mode": true,
+  "init_lora_weights": true,
+  "layer_replication": null,
+  "layers_pattern": null,
+  "layers_to_transform": null,
+  "loftq_config": {},
+  "lora_alpha": 32,
+  "lora_bias": false,
+  "lora_dropout": 0.1,
+  "megatron_config": null,
+  "megatron_core": "megatron.core",
+  "modules_to_save": null,
+  "peft_type": "LORA",
+  "qalora_group_size": 16,
+  "r": 8,
+  "rank_pattern": {},
+  "revision": null,
+  "target_modules": [
+    "v_proj",
+    "q_proj"
+  ],
+  "task_type": "CAUSAL_LM",
+  "trainable_token_indices": null,
+  "use_dora": false,
+  "use_qalora": false,
+  "use_rslora": false
+}

BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/adapter_model.safetensors


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/optimizer.pt


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/rng_state.pth


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/scaler.pt


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/scheduler.pt


+ 104 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/trainer_state.json

@@ -0,0 +1,104 @@
+{
+  "best_global_step": null,
+  "best_metric": null,
+  "best_model_checkpoint": null,
+  "epoch": 7.72,
+  "eval_steps": 500,
+  "global_step": 100,
+  "is_hyper_param_search": false,
+  "is_local_process_zero": true,
+  "is_world_process_zero": true,
+  "log_history": [
+    {
+      "epoch": 0.8,
+      "grad_norm": 5.567126274108887,
+      "learning_rate": 4.653846153846154e-05,
+      "loss": 3.9278,
+      "step": 10
+    },
+    {
+      "epoch": 1.56,
+      "grad_norm": 5.957453727722168,
+      "learning_rate": 4.269230769230769e-05,
+      "loss": 3.3103,
+      "step": 20
+    },
+    {
+      "epoch": 2.32,
+      "grad_norm": 6.457249164581299,
+      "learning_rate": 3.884615384615385e-05,
+      "loss": 2.9341,
+      "step": 30
+    },
+    {
+      "epoch": 3.08,
+      "grad_norm": 5.749636650085449,
+      "learning_rate": 3.5e-05,
+      "loss": 2.6262,
+      "step": 40
+    },
+    {
+      "epoch": 3.88,
+      "grad_norm": 7.473377227783203,
+      "learning_rate": 3.115384615384615e-05,
+      "loss": 2.3723,
+      "step": 50
+    },
+    {
+      "epoch": 4.64,
+      "grad_norm": 6.206457138061523,
+      "learning_rate": 2.7307692307692305e-05,
+      "loss": 2.3443,
+      "step": 60
+    },
+    {
+      "epoch": 5.4,
+      "grad_norm": 5.952728271484375,
+      "learning_rate": 2.3461538461538464e-05,
+      "loss": 2.1671,
+      "step": 70
+    },
+    {
+      "epoch": 6.16,
+      "grad_norm": 8.641406059265137,
+      "learning_rate": 1.9615384615384617e-05,
+      "loss": 2.165,
+      "step": 80
+    },
+    {
+      "epoch": 6.96,
+      "grad_norm": 8.118223190307617,
+      "learning_rate": 1.576923076923077e-05,
+      "loss": 2.0245,
+      "step": 90
+    },
+    {
+      "epoch": 7.72,
+      "grad_norm": 7.26712703704834,
+      "learning_rate": 1.1923076923076925e-05,
+      "loss": 1.9975,
+      "step": 100
+    }
+  ],
+  "logging_steps": 10,
+  "max_steps": 130,
+  "num_input_tokens_seen": 0,
+  "num_train_epochs": 10,
+  "save_steps": 50,
+  "stateful_callbacks": {
+    "TrainerControl": {
+      "args": {
+        "should_epoch_stop": false,
+        "should_evaluate": false,
+        "should_log": false,
+        "should_save": true,
+        "should_training_stop": false
+      },
+      "attributes": {}
+    }
+  },
+  "total_flos": 130915686678528.0,
+  "train_batch_size": 1,
+  "trial_name": null,
+  "trial_params": null
+}

BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-100/training_args.bin


+ 207 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/README.md

@@ -0,0 +1,207 @@
+---
+base_model: Qwen/Qwen3-0.6B
+library_name: peft
+pipeline_tag: text-generation
+tags:
+- base_model:adapter:Qwen/Qwen3-0.6B
+- lora
+- transformers
+---
+
+# Model Card for Model ID
+
+<!-- Provide a quick summary of what the model is/does. -->
+
+
+
+## Model Details
+
+### Model Description
+
+<!-- Provide a longer summary of what this model is. -->
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+<!-- Provide the basic links for the model. -->
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
+
+### Direct Use
+
+<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+<!-- This section is meant to convey both technical and sociotechnical limitations. -->
+
+[More Information Needed]
+
+### Recommendations
+
+<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
+
+[More Information Needed]
+
+### Training Procedure
+
+<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
+
+#### Speeds, Sizes, Times [optional]
+
+<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
+
+[More Information Needed]
+
+## Evaluation
+
+<!-- This section describes the evaluation protocols and provides the results. -->
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+<!-- This should link to a Dataset Card if possible. -->
+
+[More Information Needed]
+
+#### Factors
+
+<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
+
+[More Information Needed]
+
+#### Metrics
+
+<!-- These are the evaluation metrics being used, ideally with a description of why. -->
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+<!-- Relevant interpretability work for the model goes here -->
+
+[More Information Needed]
+
+## Environmental Impact
+
+<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.16.0

+ 36 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/adapter_config.json

@@ -0,0 +1,36 @@
+{
+  "alpha_pattern": {},
+  "auto_mapping": null,
+  "base_model_name_or_path": "Qwen/Qwen3-0.6B",
+  "bias": "none",
+  "corda_config": null,
+  "eva_config": null,
+  "exclude_modules": null,
+  "fan_in_fan_out": false,
+  "inference_mode": true,
+  "init_lora_weights": true,
+  "layer_replication": null,
+  "layers_pattern": null,
+  "layers_to_transform": null,
+  "loftq_config": {},
+  "lora_alpha": 32,
+  "lora_bias": false,
+  "lora_dropout": 0.1,
+  "megatron_config": null,
+  "megatron_core": "megatron.core",
+  "modules_to_save": null,
+  "peft_type": "LORA",
+  "qalora_group_size": 16,
+  "r": 8,
+  "rank_pattern": {},
+  "revision": null,
+  "target_modules": [
+    "v_proj",
+    "q_proj"
+  ],
+  "task_type": "CAUSAL_LM",
+  "trainable_token_indices": null,
+  "use_dora": false,
+  "use_qalora": false,
+  "use_rslora": false
+}

BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/adapter_model.safetensors


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/optimizer.pt


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/rng_state.pth


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/scaler.pt


BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/scheduler.pt


+ 125 - 0
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/trainer_state.json

@@ -0,0 +1,125 @@
+{
+  "best_global_step": null,
+  "best_metric": null,
+  "best_model_checkpoint": null,
+  "epoch": 10.0,
+  "eval_steps": 500,
+  "global_step": 130,
+  "is_hyper_param_search": false,
+  "is_local_process_zero": true,
+  "is_world_process_zero": true,
+  "log_history": [
+    {
+      "epoch": 0.8,
+      "grad_norm": 5.567126274108887,
+      "learning_rate": 4.653846153846154e-05,
+      "loss": 3.9278,
+      "step": 10
+    },
+    {
+      "epoch": 1.56,
+      "grad_norm": 5.957453727722168,
+      "learning_rate": 4.269230769230769e-05,
+      "loss": 3.3103,
+      "step": 20
+    },
+    {
+      "epoch": 2.32,
+      "grad_norm": 6.457249164581299,
+      "learning_rate": 3.884615384615385e-05,
+      "loss": 2.9341,
+      "step": 30
+    },
+    {
+      "epoch": 3.08,
+      "grad_norm": 5.749636650085449,
+      "learning_rate": 3.5e-05,
+      "loss": 2.6262,
+      "step": 40
+    },
+    {
+      "epoch": 3.88,
+      "grad_norm": 7.473377227783203,
+      "learning_rate": 3.115384615384615e-05,
+      "loss": 2.3723,
+      "step": 50
+    },
+    {
+      "epoch": 4.64,
+      "grad_norm": 6.206457138061523,
+      "learning_rate": 2.7307692307692305e-05,
+      "loss": 2.3443,
+      "step": 60
+    },
+    {
+      "epoch": 5.4,
+      "grad_norm": 5.952728271484375,
+      "learning_rate": 2.3461538461538464e-05,
+      "loss": 2.1671,
+      "step": 70
+    },
+    {
+      "epoch": 6.16,
+      "grad_norm": 8.641406059265137,
+      "learning_rate": 1.9615384615384617e-05,
+      "loss": 2.165,
+      "step": 80
+    },
+    {
+      "epoch": 6.96,
+      "grad_norm": 8.118223190307617,
+      "learning_rate": 1.576923076923077e-05,
+      "loss": 2.0245,
+      "step": 90
+    },
+    {
+      "epoch": 7.72,
+      "grad_norm": 7.26712703704834,
+      "learning_rate": 1.1923076923076925e-05,
+      "loss": 1.9975,
+      "step": 100
+    },
+    {
+      "epoch": 8.48,
+      "grad_norm": 8.528579711914062,
+      "learning_rate": 8.076923076923077e-06,
+      "loss": 1.9922,
+      "step": 110
+    },
+    {
+      "epoch": 9.24,
+      "grad_norm": 10.960204124450684,
+      "learning_rate": 4.230769230769231e-06,
+      "loss": 1.8564,
+      "step": 120
+    },
+    {
+      "epoch": 10.0,
+      "grad_norm": 14.223894119262695,
+      "learning_rate": 3.846153846153847e-07,
+      "loss": 1.9695,
+      "step": 130
+    }
+  ],
+  "logging_steps": 10,
+  "max_steps": 130,
+  "num_input_tokens_seen": 0,
+  "num_train_epochs": 10,
+  "save_steps": 50,
+  "stateful_callbacks": {
+    "TrainerControl": {
+      "args": {
+        "should_epoch_stop": false,
+        "should_evaluate": false,
+        "should_log": false,
+        "should_save": true,
+        "should_training_stop": true
+      },
+      "attributes": {}
+    }
+  },
+  "total_flos": 169579905024000.0,
+  "train_batch_size": 1,
+  "trial_name": null,
+  "trial_params": null
+}

BIN
黄靖淏/other/code/qwen3_lora_output/checkpoint-130/training_args.bin


+ 20 - 0
黄靖淏/other/code/run_qwen3.py

@@ -0,0 +1,20 @@
+from transformers import AutoTokenizer, AutoModelForCausalLM
+
+model_id = "Qwen/Qwen3-0.6B"
+
+# 加载 tokenizer 和模型
+tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
+model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).eval()
+
+# 支持 GPU(如可用)
+import torch
+device = "cuda" if torch.cuda.is_available() else "cpu"
+model = model.to(device)
+
+# 推理
+prompt = "你好,请介绍一下你自己。"
+inputs = tokenizer(prompt, return_tensors="pt").to(device)
+outputs = model.generate(**inputs, max_new_tokens=100)
+response = tokenizer.decode(outputs[0], skip_special_tokens=True)
+
+print(response)

+ 0 - 0
黄靖淏/other/code/show_m.ipynb


+ 153 - 0
黄靖淏/other/code/test_all.ipynb

@@ -0,0 +1,153 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1609c054",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from transformers import AutoModelForCausalLM\n",
+    "model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-0.6B\", trust_remote_code=True)\n",
+    "print(model)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d8d3fa97",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
+    "from peft import PeftModel\n",
+    "model_name = \"Qwen/Qwen3-0.6B\"\n",
+    "\n",
+    "# load the tokenizer and the model\n",
+    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
+    "base_model = AutoModelForCausalLM.from_pretrained(\n",
+    "    model_name,\n",
+    "    trust_remote_code=True,\n",
+    "    torch_dtype=\"auto\",\n",
+    "    device_map=\"auto\"\n",
+    ")\n",
+    "\n",
+    "\n",
+    "\n",
+    "# === 加载 PEFT adapter(LoRA 权重) ===\n",
+    "model = PeftModel.from_pretrained(base_model, r'E:\\work_yusys\\gpt_teach\\code\\qwen3_lora_output\\checkpoint-130')\n",
+    "model.eval()\n",
+    "print(model)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3bccd8f0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n",
+    "def chatFunc(prompt: str):\n",
+    "\n",
+    "    messages = [\n",
+    "        {\"role\": \"system\", \"content\": \"You are a helpful assistant. Determine whether the sentiment entered by the user is positive or negative. Note that only positive or negative cases are output.Avoid being ambiguous pleases.\"},\n",
+    "        {\"role\": \"user\", \"content\": prompt}\n",
+    "    ]\n",
+    "    text = tokenizer.apply_chat_template(\n",
+    "        messages,\n",
+    "        tokenize=False,\n",
+    "        add_generation_prompt=True,\n",
+    "        enable_thinking=True \n",
+    "    )\n",
+    "    model_inputs = tokenizer([text], return_tensors=\"pt\").to(model.device)\n",
+    "    \n",
+    "    generated_ids = model.generate(\n",
+    "        **model_inputs,\n",
+    "        max_new_tokens=32768\n",
+    "    )\n",
+    "    output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() \n",
+    "\n",
+    "\n",
+    "    try:\n",
+    "        \n",
+    "        index = len(output_ids) - output_ids[::-1].index(151668)\n",
+    "    except ValueError:\n",
+    "        index = 0\n",
+    "    content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip(\"\\n\")\n",
+    "    print(\"content:\", content)\n",
+    "    return content\n",
+    "    # print(\"thinking content:\", thinking_content)\n",
+    "    "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5b85e895",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import glob\n",
+    "\n",
+    "positive_txt_files = glob.glob('C:\\\\Users\\\\28191\\\\Desktop\\\\xuexi_py\\\\xuexi_git\\\\ai_learning\\\\data\\\\acllmdb_sentiment_small\\\\positive\\\\*.txt', recursive=True)\n",
+    "negative_txt_files = glob.glob('C:\\\\Users\\\\28191\\\\Desktop\\\\xuexi_py\\\\xuexi_git\\\\ai_learning\\\\data\\\\acllmdb_sentiment_small\\\\negative\\\\*.txt', recursive=True)\n",
+    "res_list = []\n",
+    "fail_txt_list = []\n",
+    "count = 0.0\n",
+    "ca = 0.0\n",
+    "for index, file_path in enumerate(negative_txt_files, start=0):\n",
+    "    print(f\"找到文件: {file_path}\")\n",
+    "    count+=1\n",
+    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
+    "        content = f.read()  # 读取所有内容\n",
+    "        res = chatFunc(content)\n",
+    "        if 'negative' in res or 'Negative' in res:  \n",
+    "            ca+=1\n",
+    "        else:\n",
+    "            fail_txt_list.append(res+content)\n",
+    "        res_list.append(res)\n",
+    "\n",
+    "for index, file_path in enumerate(positive_txt_files, start=0):\n",
+    "    print(f\"找到文件: {file_path}\")\n",
+    "    count+=1\n",
+    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
+    "        content = f.read()  # 读取所有内容\n",
+    "        res = chatFunc(content)\n",
+    "        if 'positive' in res or 'Positive' in res:  \n",
+    "            ca+=1\n",
+    "        else:\n",
+    "            fail_txt_list.append(res+content)\n",
+    "        res_list.append(res)    \n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a019b295",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for fail_txt in fail_txt_list:\n",
+    "    print(fail_txt)    "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c3f7cdb4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(\"count::>\" + str(count) + \"  ac:::>\" + str(ca) + \"   accuracy:::>\" + str(ca/count))"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

File diff suppressed because it is too large
+ 160 - 0
黄靖淏/t1/code/base_text.ipynb


+ 215 - 0
黄靖淏/t1/code/function_calling.ipynb

@@ -0,0 +1,215 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4d0220be",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from openai import OpenAI\n",
+    "from dotenv import load_dotenv \n",
+    "import json\n",
+    "from pydantic import BaseModel\n",
+    "from openai import BadRequestError\n",
+    "\n",
+    "client = OpenAI(base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "       aapi_key=os.getenv(\"BAILIAN_API_KEY\"))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3ab0c10b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "positive_comment_list = []\n",
+    "negative_comment_list = []\n",
+    "\n",
+    "class Emotions(BaseModel):\n",
+    "    emotion : str\n",
+    "    reason : str\n",
+    "\n",
+    "def positive_func(emotion : str, reason : str):\n",
+    "    positive_emotion =  Emotions(emotion = emotion, reason = reason)\n",
+    "    positive_comment_list.append(positive_emotion)\n",
+    "    return positive_emotion\n",
+    "\n",
+    "def negative_func(emotion : str, reason : str):\n",
+    "    negative_emotion =  Emotions(emotion = emotion, reason = reason)\n",
+    "    negative_comment_list.append(negative_emotion)\n",
+    "    return negative_emotion\n",
+    "\n",
+    "\n",
+    "tools = [\n",
+    "    {\n",
+    "        \"type\": \"function\",\n",
+    "        \"function\": {\n",
+    "            \"name\": \"positive_func\",\n",
+    "            \"description\": \"当你判断用户输入的评论情感倾向为“Positive”时调用此函数\",\n",
+    "            \"parameters\":{\n",
+    "                \"emotion\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"此参数为用户输入的评论情感倾向\"\n",
+    "                    },\n",
+    "                \"reason\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"此参数为判断为此情感倾向的原因\"\n",
+    "                    },\n",
+    "            }\n",
+    "        }\n",
+    "    },\n",
+    "        {\n",
+    "        \"type\": \"function\",\n",
+    "        \"function\": {\n",
+    "            \"name\": \"negative_func\",\n",
+    "            \"description\": \"当你判断用户输入的评论情感倾向为“Negative”时调用此函数\",\n",
+    "            \"parameters\":{\n",
+    "                \"emotion\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"此参数为用户输入的评论情感倾向\"\n",
+    "                    },\n",
+    "                \"reason\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"此参数为判断为此情感倾向的原因\"\n",
+    "                    },\n",
+    "            }\n",
+    "        }\n",
+    "    },\n",
+    "]\n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "65268481",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def chat_with_functions(content: str) -> Emotions:\n",
+    "    try:\n",
+    "        completion = client.chat.completions.create(\n",
+    "            model=\"qwen3-4b\",\n",
+    "            messages=[\n",
+    "                {\"role\": \"system\", \"content\": \"You are a helpful assistant. Determine whether the sentiment entered by the user is positive or negative.\"},\n",
+    "                {\"role\": \"user\", \"content\": content},\n",
+    "            ],\n",
+    "            tools=tools,\n",
+    "            tool_choice=\"auto\",\n",
+    "            extra_body={\"enable_thinking\": False},\n",
+    "        )\n",
+    "        response_json = json.loads(completion.model_dump_json())\n",
+    "        \n",
+    "        if response_json['choices'] and 'tool_calls' in response_json['choices'][0][\"finish_reason\"]:\n",
+    "            params = json.loads(response_json['choices'][0][\"message\"][\"tool_calls\"][0][\"function\"][\"arguments\"])\n",
+    "            return Emotions(emotion=params[\"emotion\"], reason=params[\"reason\"])\n",
+    "        else: \n",
+    "            print(\"没用工具\")\n",
+    "            return Emotions(emotion=\"Unknown\", reason=\"No reason provided.\")\n",
+    "    except BadRequestError:\n",
+    "        print(\"文本内容不当:::::>\" + content)\n",
+    "        return Emotions(emotion=\"Error\", reason=\"文本内容不当.\")\n",
+    "    except Exception as e:\n",
+    "        print(f\"Unexpected error: {e}\")\n",
+    "        return Emotions(emotion=\"Error\", reason=\"未知错误\")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6e04870e",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import glob\n",
+    "import os\n",
+    "def process_files(file_list, expected_label, res_list, fail_txt_list):\n",
+    "    correct_count = 0\n",
+    "    for file_path in file_list:\n",
+    "        print(f\"找到文件: {file_path}\")\n",
+    "        with open(file_path, 'r', encoding='utf-8') as f:\n",
+    "            content = f.read()\n",
+    "            res = chat_with_functions(content)\n",
+    "            res_list.append(res)\n",
+    "            print(res.emotion)\n",
+    "            if expected_label.lower() in res.emotion.lower():\n",
+    "                correct_count += 1\n",
+    "            else:\n",
+    "                fail_txt_list.append(f\"预测: {res.emotion}\\n原文:\\n{content}\\n原因:\\n{res.reason}\\n\")\n",
+    "    return correct_count\n",
+    "\n",
+    "\n",
+    "base_dir = 'C:\\\\Users\\\\28191\\\\Desktop\\\\xuexi_py\\\\xuexi_git\\\\ai_learning\\\\data\\\\acllmdb_sentiment_small'\n",
+    "positive_txt_files = glob.glob(os.path.join(base_dir, 'positive', '*.txt'), recursive=True)\n",
+    "negative_txt_files = glob.glob(os.path.join(base_dir, 'negative', '*.txt'), recursive=True)\n",
+    "\n",
+    "res_list = []\n",
+    "fail_txt_list = []\n",
+    "total_count = len(positive_txt_files) + len(negative_txt_files)\n",
+    "\n",
+    "correct_positive = process_files(positive_txt_files, 'positive', res_list, fail_txt_list)\n",
+    "correct_negative = process_files(negative_txt_files, 'negative', res_list, fail_txt_list)\n",
+    "correct_total = correct_positive + correct_negative\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6cdf6ddd",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n",
+    "accuracy =correct_total/total_count\n",
+    "print(f\"count::> {total_count}   ac:::> {correct_total}   accuracy:::> {accuracy}\")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0537fa27",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import pandas as pd\n",
+    "\n",
+    "fail_data = []\n",
+    "for item in fail_txt_list:\n",
+    "    try:\n",
+    "        parts = item.split('\\n')\n",
+    "        predicted = parts[0].replace(\"预测: \", \"\").strip()\n",
+    "        reason_index = parts.index('原因:') \n",
+    "        original_text = '\\n'.join(parts[2:reason_index])  \n",
+    "        reason = '\\n'.join(parts[reason_index+1:])  \n",
+    "        fail_data.append({\n",
+    "            'predicted_emotion': predicted,\n",
+    "            'original_text': original_text,\n",
+    "            'reason': reason\n",
+    "        })\n",
+    "    except Exception as e:\n",
+    "        print(f\"解析失败: {e}\")\n",
+    "        continue\n",
+    "\n",
+    "df = pd.DataFrame(fail_data)\n",
+    "\n",
+    "df.to_csv('failed_predictions.csv', index=False, encoding='utf-8-sig')"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "qwen-env",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "name": "python",
+   "version": "3.13.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

+ 148 - 0
黄靖淏/t1/code/function_calling_test.ipynb

@@ -0,0 +1,148 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8779ae67",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from openai import OpenAI\n",
+    "from dotenv import load_dotenv \n",
+    "import json\n",
+    "from pydantic import BaseModel\n",
+    "from openai import BadRequestError\n",
+    "import os\n",
+    "\n",
+    "client = OpenAI(base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
+    "       aapi_key=os.getenv(\"BAILIAN_API_KEY\"))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 40,
+   "id": "a28daf57",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def calling_test(param1 : float, param2 : float):\n",
+    "    print(\"success\")\n",
+    "    print(param1 * param2)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 41,
+   "id": "96fa1b69",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{\"id\":\"chatcmpl-d0b30cda-ee7c-9b0b-b592-c8cbc25944a7\",\"choices\":[{\"finish_reason\":\"tool_calls\",\"index\":0,\"logprobs\":null,\"message\":{\"content\":\"\",\"refusal\":null,\"role\":\"assistant\",\"annotations\":null,\"audio\":null,\"function_call\":null,\"tool_calls\":[{\"id\":\"call_ee0d6dfe3cd945e4b76e29\",\"function\":{\"arguments\":\"{\\\"param1\\\": \\\"1651\\\", \\\"param2\\\": \\\"74515\\\"}\",\"name\":\"calling_test\"},\"type\":\"function\",\"index\":0}],\"reasoning_content\":\"\"}}],\"created\":1752051153,\"model\":\"qwen3-4b\",\"object\":\"chat.completion\",\"service_tier\":null,\"system_fingerprint\":null,\"usage\":{\"completion_tokens\":34,\"prompt_tokens\":203,\"total_tokens\":237,\"completion_tokens_details\":null,\"prompt_tokens_details\":null}}\n"
+     ]
+    }
+   ],
+   "source": [
+    "tools = [\n",
+    "    {\n",
+    "        \"type\": \"function\",\n",
+    "        \"function\": {\n",
+    "            \"name\": \"calling_test\",\n",
+    "            \"description\": \"调用函数测试用例,作用是打印 两数相乘的结果 \",\n",
+    "            \"parameters\":{\n",
+    "                \"param1\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"The test param1.\"\n",
+    "                    },\n",
+    "                \"param2\":{\n",
+    "                    \"type\":\"string\",\n",
+    "                    \"description\":\"The test param2.\"\n",
+    "                    },\n",
+    "            }\n",
+    "        }\n",
+    "    },\n",
+    "]\n",
+    "\n",
+    "response = client.chat.completions.create(\n",
+    "    model=\"qwen3-4b\",\n",
+    "    messages=[{\"role\": \"user\", \"content\": \"将1651和74515相乘,打印结果\"}],\n",
+    "    tools=tools,\n",
+    "    extra_body={\"enable_thinking\": False},\n",
+    "    tool_choice=\"auto\",\n",
+    ")\n",
+    "\n",
+    "print(response.model_dump_json())\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "70a4e1e4",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "success\n",
+      "123024265.0\n"
+     ]
+    }
+   ],
+   "source": [
+    "response_json = json.loads(response.model_dump_json())\n",
+    "if response_json['choices'] and 'tool_calls' in response_json['choices'][0][\"finish_reason\"]:\n",
+    "    params = json.loads(response_json['choices'][0][\"message\"][\"tool_calls\"][0][\"function\"][\"arguments\"])\n",
+    "    calling_test(float(params[\"param1\"]), float(params[\"param2\"]))\n",
+    "else:\n",
+    "    print(\"No tool calls found in the response.\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "id": "9cc7f101",
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "ModuleNotFoundError",
+     "evalue": "No module named 'modelscope'",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
+      "\u001b[31mModuleNotFoundError\u001b[39m                       Traceback (most recent call last)",
+      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[2]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mmodelscope\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m snapshot_download\n\u001b[32m      2\u001b[39m model_dir = snapshot_download(\u001b[33m'\u001b[39m\u001b[33mQwen/Qwen3-0.6B\u001b[39m\u001b[33m'\u001b[39m,cache_dir=\u001b[33m\"\u001b[39m\u001b[33mE:\u001b[39m\u001b[33m\\\u001b[39m\u001b[33mwork_yusys\u001b[39m\u001b[33m\"\u001b[39m)\n",
+      "\u001b[31mModuleNotFoundError\u001b[39m: No module named 'modelscope'"
+     ]
+    }
+   ],
+   "source": [
+    "from modelscope import snapshot_download\n",
+    "model_dir = snapshot_download('Qwen/Qwen3-0.6B',cache_dir=\"E:\\work_yusys\")\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "ai-learning",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.13"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}

File diff suppressed because it is too large
+ 144 - 0
黄靖淏/t1/code/structured_output_test.ipynb


+ 0 - 0
黄靖淏/t2/code/t2.ipynb


Some files were not shown because too many files changed in this diff