软件测试的核心方法论:从需求分析到测试报告的全流程解析
2026/5/15 0:29:51
本案例展示了如何使用LangChain和LangGraph Studio构建一个多智能体工作流系统,用于研究特定人物、职业背景、所属公司,并生成后续问题或面试提示。该系统通过多个专门的智能体协同工作,实现从信息收集、分析到问题生成的完整流程,并通过LangGraph Studio提供可视化调试和扩展功能。
核心目标:
LangChain LangGraph LangGraph Studio LangSmith
Anthropic Claude OpenAI GPT
Tavily搜索 Docker Python 3.11
Pydantic typing-extensions tavily-python
# 安装核心依赖 %pip install -qU langchain-opentutorial langsmith langchain-anthropic langgraph tavily-python # 环境变量配置 import os os.environ["ANTHROPIC_API_KEY"] = "your-anthropic-api-key" os.environ["LANGCHAIN_TRACING_V2"] = "true" os.environ["LANGCHAIN_API_KEY"] = "your-langchain-api-key" os.environ["TAVILY_API_KEY"] = "your-tavily-api-key"LangGraph Studio环境要求:
# 人员信息模型 class Person(BaseModel): """要研究的人员""" name: str = Field(description="人员姓名") company: str = Field(description="所属公司") background: str = Field(description="职业背景") # 状态管理模型 class InputState(BaseModel): person: Person = Field(description="要研究的人员") class OverallState(BaseModel): person: Person = Field(description="要研究的人员") company_notes: Optional[str] = Field(description="公司研究笔记") person_notes: Optional[str] = Field(description="人员研究笔记") project_notes: Optional[str] = Field(description="项目研究笔记") questions: Optional[List[str]] = Field(description="生成的问题") reflection: Optional[ReflectionOutput] = Field(description="反思结果") class OutputState(BaseModel): questions: List[str] = Field(description="生成的问题")# 人员研究查询生成 def generate_queries(state: OverallState) -> OverallState: """为人员研究生成搜索查询""" prompt = QUERY_WRITER_PROMPT.format( person_name=state.person.name, person_background=state.person.background, person_company=state.person.company ) queries = llm.invoke(prompt) return {"queries": queries} # 公司研究查询生成 def generate_queries_for_company(state: OverallState) -> OverallState: """为公司研究生成搜索查询""" prompt = SEARCH_COMPANY_PROMPT.format( company_name=state.person.company, person_name=state.person.name ) queries = llm.invoke(prompt) return {"company_queries": queries}# 公司研究 def research_company(state: OverallState) -> OverallState: """研究公司信息""" queries = json.loads(state.company_queries) search_results = tavily_search.run(queries) prompt = COMPANY_INFO_PROMPT.format( company_name=state.person.company, person_name=state.person.name, search_results=search_results ) company_notes = llm.invoke(prompt) return {"company_notes": company_notes} # 人员研究 def research_person(state: OverallState) -> OverallState: """研究人员信息""" queries = json.loads(state.queries) search_results = tavily_search.run(queries) prompt = INFO_PROMPT.format( person_name=state.person.name, person_background=state.person.background, search_results=search_results ) person_notes = llm.invoke(prompt) return {"person_notes": person_notes}# 项目查询提取 def extract_project_queries(state: OverallState) -> OverallState: """从人员笔记中提取项目相关查询""" prompt = PROJECT_EXTRACTOR_PROMPT.format( person_notes=state.person_notes ) project_queries = llm.invoke(prompt) return {"project_queries": project_queries} # 项目研究 def research_projects(state: OverallState) -> OverallState: """研究项目信息""" queries = json.loads(state.project_queries) search_results = tavily_search.run(queries) prompt = PROJECT_INFO_PROMPT.format( person_name=state.person.name, search_results=search_results ) project_notes = llm.invoke(prompt) return {"project_notes": project_notes} # 信息整合 def combine_notes(state: OverallState) -> OverallState: """合并所有笔记""" combined = f""" # 人员笔记 {state.person_notes} # 公司笔记 {state.company_notes} # 项目笔记 {state.project_notes} """ return {"combined_notes": combined}# 问题生成 def generate_questions(state: OverallState) -> OverallState: """基于合并笔记生成面试问题""" prompt = QUESTION_WRITER_PROMPT.format( person_name=state.person.name, person_background=state.person.background, combined_notes=state.combined_notes ) questions = llm.invoke(prompt) return {"questions": questions} # 反思模块 def reflection(state: OverallState) -> OverallState: """评估信息质量并决定是否需要更多研究""" prompt = REFLECTION_PROMPT.format( person_name=state.person.name, person_background=state.person.background, person_notes=state.person_notes, company_notes=state.company_notes, project_notes=state.project_notes, questions=state.questions ) reflection_output = llm.with_structured_output(ReflectionOutput).invoke(prompt) return {"reflection": reflection_output}# 构建工作流图 builder = StateGraph(OverallState, input=InputState, output=OutputState) # 添加节点 builder.add_node("generate_queries", generate_queries) builder.add_node("research_person", research_person) builder.add_node("generate_queries_for_company", generate_queries_for_company) builder.add_node("research_company", research_company) builder.add_node("extract_project_queries", extract_project_queries) builder.add_node("research_projects", research_projects) builder.add_node("combine_notes", combine_notes) builder.add_node("generate_questions", generate_questions) builder.add_node("reflection", reflection) # 添加边 builder.add_edge(START, "generate_queries") builder.add_edge("generate_queries", "research_person") builder.add_edge("research_person", "generate_queries_for_company") builder.add_edge("generate_queries_for_company", "research_company") builder.add_edge("research_company", "extract_project_queries") builder.add_edge("extract_project_queries", "research_projects") builder.add_edge("research_projects", "combine_notes") builder.add_edge("combine_notes", "generate_questions") builder.add_edge("generate_questions", "reflection") # 添加条件边 builder.add_conditional_edges( "reflection", route_from_reflection, { "generate_more_queries": "generate_queries", "end": END } ) # 编译图 graph = builder.compile()# langgraph.json配置文件 { "dependencies": ["."], "graphs": { "multi_agent": "./langgraph_studio/output_script.py:graph" } } # 代码提取脚本 def extract_code_cells(notebook_path, output_path): """从Jupyter notebook提取代码单元格并转换为可执行脚本""" with open(notebook_path, 'r', encoding='utf-8') as f: notebook = json.load(f) # 处理代码单元格 for cell in notebook['cells']: if cell['cell_type'] == 'code': # 转换pip安装命令 # 处理可视化代码 # 去重并保存 # 保存转换后的代码 with open(output_path, 'w', encoding='utf-8') as f: for cell in unique_cells: f.write(cell + '\n\n')该多智能体系统实现了以下效果:
生成的问题示例:
本案例的实现基于以下思路:
关键技术点:
本案例展示了如何使用LangChain和LangGraph Studio构建一个复杂的多智能体工作流系统。通过模块化设计、智能体协同和可视化调试,该系统能够自动收集、分析信息并生成高质量的面试问题。LangGraph Studio的集成大大简化了复杂工作流的开发和调试过程,为构建高级AI应用提供了强大支持。
该系统的核心价值在于将复杂任务分解为多个专门模块,通过智能体协同工作实现自动化处理,同时通过可视化工具提供直观的调试和优化界面。这种设计模式可以广泛应用于各种需要多步骤、多维度信息处理的AI应用场景。