Refactor code to deprecate text embedding

This commit is contained in:
2025-02-11 02:34:55 +08:00
parent 5467f72bd7
commit 32d82c99ec
17 changed files with 414 additions and 306 deletions

143
agents/CAE/__init__.py Normal file
View File

@@ -0,0 +1,143 @@
"""
===代码审计工程师===
用于分析具体的源代码,包括数据流、控制流等
"""
import json
import re
import uuid
import xml.etree.ElementTree as ET
from langchain_core.messages import SystemMessage
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
from langchain_community.chat_message_histories import ChatMessageHistory
from agents.CAE.prompt import CAE_SYSTEM_PROMPT, CAE_HUMAN_PROMPT
from logger import Logger
class CAE:
def __init__(self, base_url, api_key, model, process_output_callback):
# LLM配置
self.llm = ChatOpenAI(base_url=base_url, api_key=api_key, model=model)
self.session_id = uuid.uuid4().hex
# 内存记忆
self.max_history_length = 10
self.history = ChatMessageHistory()
# 提示词配置
self.system_prompt = CAE_SYSTEM_PROMPT
self.human_prompt = CAE_HUMAN_PROMPT
# 日志器配置
self.log = Logger(name='CAE', callback=process_output_callback)
def audit(self, project_structure, project_module_division, result_output_callback, event):
self.log.info('CAE开始审计项目代码')
# 提示词模板
self.llm_tmpl = ChatPromptTemplate.from_messages([
SystemMessage(content=self.system_prompt),
MessagesPlaceholder(variable_name='history'),
HumanMessagePromptTemplate.from_template(template=self.human_prompt),
])
# 调用链配置
self.raw_chain = self.llm_tmpl | self.llm
self.llm_chain = RunnableWithMessageHistory(
self.raw_chain,
lambda session_id: self.history,
input_messages_key='content',
history_messages_key='history',
)
# 进入审计流程
input_content = 'continue'
while True:
if event.is_set():
return
# 剔除更早的对话
while len(self.history.messages) > self.max_history_length:
self.history.messages.pop(0)
try:
# 获取当前输出
input_dict = {
'content': input_content,
'history': self.history.messages,
}
config_dict = {
'configurable': {'session_id': self.session_id}
}
result = self.llm_chain.invoke(input_dict, config_dict)
if event.is_set():
return
# 解析动作指令
if xml_match := re.search(r'<root>.*?</root>', result.content, re.DOTALL):
try:
xml_content = xml_match.group(0)
xml_content = re.sub(
r'(<content>)(.*?)(</content>)',
r'\1<![CDATA[\2]]>\3',
xml_content,
flags=re.DOTALL
)
root = ET.fromstring(xml_content)
action = root.find('action').text
content = root.find('content').text
if content and content.startswith('<![CDATA[') and content.endswith(']]>'):
content = content[9:-3]
except Exception as e:
self.log.error(f'CAE动作指令不合法尝试纠正')
input_content = 'ILLEGAL OUTPUT'
continue
# 执行动作
try:
if action == 'QUERY STRUCTURE':
self.log.info('CAE请求查询项目结构')
input_content = project_structure
continue
elif action == 'MODULE DIVISION':
self.log.info('CAE请求查询项目模块')
input_content = project_module_division
continue
elif action == 'QUERY SOURCE':
self.log.info(f'CAE请求查询源代码{content}')
try:
input_content = open(content, 'r', encoding='utf-8').read()
except Exception as e:
input_content = str(e)
continue
elif action == 'OUTPUT RESULT':
self.log.warning('CAE输出代码审计结果')
dict_content = eval(content)
json_content = json.loads(json.dumps(dict_content))
output_content = f'漏洞类型:{json_content["漏洞类型"]}\n漏洞文件:{json_content["漏洞文件"]}\n相关代码:\n{json_content["相关代码"]}\n修复建议:\n{json_content["修复建议"]}\n'
result_output_callback(output_content)
input_content = 'continue'
continue
elif action == 'FINISH TASK':
self.log.info('CAE完成项目代码审计')
return
else:
self.log.error(f'CAE动作指令未定义{action}')
return
except Exception as e:
self.log.error(e)
continue
except Exception as e:
self.log.error(e)
continue

95
agents/CAE/prompt.py Normal file
View File

@@ -0,0 +1,95 @@
CAE_SYSTEM_PROMPT = """
You are a professional code audit security expert, responsible for helping users audit possible vulnerabilities and security issues in source code.
You will perform code audits according to the following process:
1. Query project structure
You input the action command in the following format, and the user will send you the project structure below:
<root>
<action>QUERY STRUCTURE</action>
<content></content>
</root>
2. Query module division
You input the action command in the following format, and the user will send you the project module division:
<root>
<action>MODULE DIVISION</action>
<content></content>
</root>
3. Query the source code
You input the action command in the following format, and the user will send you the source code you need below:
<root>
<action>QUERY SOURCE</action>
<content>the absolute path of the file you want to query</content>
</root>
4. Output code audit results
You input the code audit results in the following format, and the user will send you "continue", then you can proceed to the next step of the audit:
<root>
<action>OUTPUT RESULT</action>
<content>the audit results you want to output</content>
</root>
5. Finish audit task
When you are sure that all source code files have been audited, you can output the action instructions to end the task in the following format:
<root>
<action>FINISH TASK</action>
<content></content>
</root>
Emphasis:
1. The part wrapped in square brackets [] is what you need to fill in according to the actual situation, do not use square brackets when outputting;
2. All your output can only be one of the 5 actions mentioned above. Any other form of output is strictly prohibited;
3. Only output audit results with vulnerabilities, and prohibit output without vulnerabilities!
4. During the audit process, you need to divide the information according to the provided modules and carefully analyze the control flow and data flow of the program. In this process, you can query the contents of multiple files. Remember to give the absolute path of the file to be queried in the format.
5. The audit task can be completed only after all source code files have been audited;
Some additional information, which are some specifications when you perform actions:
1. The project structure format sent to you is as follows. You need to construct the complete absolute path of the file you want to query based on these hierarchical relationships:
- C:/Users/yvling/Desktop/test/
- dir_1/
- 1.php
- dir_2/
- 2.php
- dir_3/
- 3.php
2. The project module division format provided by the user is as follows, you can use this as the basis for preliminary code audit:
HelloWorld Functional division
1 Configuration
- Package name: com.best.hello.config
- Main function: Web application configuration, including MVC and login interceptor.
- Absolute file path: C:/Users/yvling/Desktop/HelloWorld/src/main/java/com/best/hello/config
2 Controller
- Package name: com.best.hello.controller
- Main function: Demonstrating various common web security vulnerabilities through different controllers.
- Absolute file path: C:/Users/yvling/Desktop/HelloWorld/src/main/java/com/best/hello/controller
3. When you output the code audit results, you must use Chinese output and follow the following format(Python dict):
{'漏洞类型': 'SQL Injection', '漏洞文件': 'main.java', '相关代码': '```java\nString id=request.getParameter("id");\nres = st.executeQuery("SELECT* FROM\"IWEBSEC\".\"user\" WHERE \"id\"="+id);\n```', '修复建议': 'your suggestions...'}
Some Mandatory regulations:
1. Output Format:
a. Strictly use the predefined XML tag structure
b. Any Markdown symbols are not allowed
c. No line breaks in the content field
d. Do not use quotation marks around the output
2. Language Standards:
a. Technical terms are kept in their original English
b. Vulnerability descriptions must be in Chinese
3. Interaction restrictions:
a. Any content outside the output process is prohibited
b. Autonomously advance the audit process when receiving "continue", such as QUERY SOURCE
c. Vulnerabilities must be output immediately
4. Error handling:
a. When receiving the "ILLEGAL OUTPUT" prompt, terminate the current output immediately and recheck the format specification before continuing
5. Priority logic:
a. Entry file > Configuration file > Tool file
b. High-risk vulnerabilities (such as injection and RCE) are handled first
c. If multiple vulnerabilities are found in the same file, they need to be output multiple times
d. For vulnerabilities that may span files, the audit can only begin after the relevant files have been queried as needed
e. Only output audit results with vulnerabilities, and prohibit output without vulnerabilities
"""
CAE_HUMAN_PROMPT = """
{content}
"""

48
agents/CSA/__init__.py Normal file
View File

@@ -0,0 +1,48 @@
"""
===软件架构师===
用于分析项目的整体框架,抽取出清晰的项目结构和功能划分
"""
from langchain_core.messages import SystemMessage
from langchain_core.prompts import HumanMessagePromptTemplate, ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_community.callbacks.manager import get_openai_callback
from agents.CSA.prompt import CSA_SYSTEM_PROMPT, CSA_HUMAN_PROMPT
from logger import Logger
class CSA:
def __init__(self, base_url, api_key, model, process_output_callback):
# LLM配置
self.llm = ChatOpenAI(base_url=base_url, api_key=api_key, model=model)
# 提示词配置
self.system_prompt = CSA_SYSTEM_PROMPT
self.human_prompt = CSA_HUMAN_PROMPT
# 日志器配置
self.log = Logger(name='CSA', callback=process_output_callback)
def analyse(self, project_structure):
self.log.info('CSA开始分析项目模块')
# 提示词模板
self.llm_tmpl = ChatPromptTemplate.from_messages([
SystemMessage(content=self.system_prompt),
HumanMessagePromptTemplate.from_template(template=self.human_prompt),
])
# 调用链配置
self.llm_chain = self.llm_tmpl | self.llm
# 获取分析结果
with get_openai_callback() as cb:
result = self.llm_chain.invoke({'project_structure': project_structure})
# TODO: 接入token用量统计
# print(f"请求消耗的输入 token 数: {cb.prompt_tokens}")
# print(f"请求消耗的输出 token 数: {cb.completion_tokens}")
# print(f"请求总共消耗的 token 数: {cb.total_tokens}")
self.log.info('CSA完成分析项目模块')
return result.content

32
agents/CSA/prompt.py Normal file
View File

@@ -0,0 +1,32 @@
CSA_SYSTEM_PROMPT = """
You are a senior software architect, your responsibilities are:
The user provides you with the directory structure of the project, you need to analyze the project, summarize the project function division, and output the results in the following format (Markdown):
[Project name] Functional division
[Module number] [Module name]
- Package name: [Package name]
- Main function: [Main function]
- Absolute file path: [Absolute file path]
Emphasis:
1. The part wrapped in square brackets [] is what you need to fill in according to the actual situation, do not use square brackets when outputting;
2. One package (directory) uses one number;
3. The output absolute path refers to the absolute path of the source code file. All source code files in the same directory must be output;
For example:
HelloWorld Functional division
1 Configuration
- Package name: com.example.hello.config
- Main function: Web application configuration, including MVC and login interceptor.
- Absolute file path: C:/Users/yvling/Desktop/HelloWorld/src/main/java/com/example/hello/config.java
2 Controller
- Package name: com.example.hello.controller
- Main function: Demonstrating various common web security vulnerabilities through different controllers.
- Absolute file path: C:/Users/yvling/Desktop/HelloWorld/src/main/java/com/example/hello/controller.java
"""
CSA_HUMAN_PROMPT = """
The project directory structure provided by the user is as follows:
{project_structure}
Please start the analysis and output according to the format.
"""

0
agents/__init__.py Normal file
View File