您好:这款游戏可以开挂,确实是有挂的,很多玩家在...
2025-08-22 0
在前几期的MCP系列教程中,我们已经了解了MCP的基本概念、工作原理和核心组件。本期我们将深入探讨如何将Model Context Protocol (MCP) 与大型语言模型(LLM)进行深度集成,实现更加智能和强大的AI应用。
本文将涵盖三个核心方面:本地模型接入(Ollama/vLLM)、在线模型扩展(OpenAI/DeepSeek)以及提示词模板设计,帮助你全面掌握MCP与LLM的集成技巧。
测试开发全景图:人工智能测试、智能驱动、自动化、测试开发、左移右移与DevOps的持续交付 - 测试开发 - 爱测-测试人社区
MCP与LLM的集成通常采用客户端-服务器架构:
+----------------+ +----------------+ +----------------+| | | | | || MCP客户端 +------+ MCP服务器 +------+ LLM后端 || (应用层) | | (适配层) | | (模型层) || | | | | |+----------------+ +----------------+ +----------------+
首先安装必要的依赖:
# 安装Ollamacurl -fsSL https://ollama.ai/install.sh | sh# 安装Python MCP SDKpip install mcp[sse] ollama
# ollama_mcp_server.pyimport mcp.server as mcpfrom mcp.server import Serverimport ollamafrom pydantic import BaseModel# 创建服务器实例server = Server("ollama-mcp-server")class GenerateRequest(BaseModel): model: str = "llama2" prompt: str max_tokens: int = 512@server.tool()async def generate_text(request: GenerateRequest) -> str: """使用Ollama生成文本""" try: response = ollama.generate( model=request.model, prompt=request.prompt, options={'num_predict': request.max_tokens} ) return response['response'] except Exception as e: return f"生成文本时出错: {str(e)}"@server.list_resources()async def list_models() -> list: """列出可用的Ollama模型""" try: models = ollama.list() return [ mcp.Resource( uri=f"ollama://{model['name']}", name=model['name'], description=f"Ollama模型: {model['name']}" ) for model in models['models'] ] except Exception as e: return []if __name__ == "__main__": # 启动服务器 mcp.run(server, transport='stdio')
// mcp.client.json{ "mcpServers": { "ollama": { "command": "python", "args": ["/path/to/ollama_mcp_server.py"] } }}
# vllm_mcp_server.pyimport mcp.server as mcpfrom mcp.server import Serverfrom vllm import LLM, SamplingParamsfrom pydantic import BaseModelimport asyncio# 全局vLLM实例vllm_engine = Noneclass VLLMRequest(BaseModel): prompt: str max_tokens: int = 256 temperature: float = 0.7 top_p: float = 0.9def initialize_vllm(model_name: str = "facebook/opt-125m"): """初始化vLLM引擎""" global vllm_engine if vllm_engine is None: vllm_engine = LLM( model=model_name, tensor_parallel_size=1, gpu_memory_utilization=0.9 )server = Server("vllm-mcp-server")@server.tool()async def vllm_generate(request: VLLMRequest) -> str: """使用vLLM生成文本""" try: sampling_params = SamplingParams( temperature=request.temperature, top_p=request.top_p, max_tokens=request.max_tokens ) outputs = vllm_engine.generate([request.prompt], sampling_params) return outputs[0].outputs[0].text except Exception as e: return f"vLLM生成失败: {str(e)}"@server.list_resources()async def list_vllm_models() -> list: """列出支持的vLLM模型""" return [ mcp.Resource( uri="vllm://facebook/opt-125m", name="OPT-125M", description="Facebook OPT 125M参数模型" ), mcp.Resource( uri="vllm://gpt2", name="GPT-2", description="OpenAI GPT-2模型" ) ]if __name__ == "__main__": # 初始化vLLM initialize_vllm() mcp.run(server, transport='stdio')
# openai_mcp_server.pyimport mcp.server as mcpfrom mcp.server import Serverfrom openai import OpenAIfrom pydantic import BaseModelimport osclient = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))server = Server("openai-mcp-server")class OpenAIChatRequest(BaseModel): message: str model: str = "gpt-3.5-turbo" temperature: float = 0.7@server.tool()async def chat_completion(request: OpenAIChatRequest) -> str: """使用OpenAI API进行对话补全""" try: response = client.chat.completions.create( model=request.model, messages=[{"role": "user", "content": request.message}], temperature=request.temperature ) return response.choices[0].message.content except Exception as e: return f"OpenAI API调用失败: {str(e)}"@server.list_resources()async def list_openai_models() -> list: """列出可用的OpenAI模型""" return [ mcp.Resource( uri="openai://gpt-3.5-turbo", name="GPT-3.5-Turbo", description="OpenAI GPT-3.5 Turbo模型" ), mcp.Resource( uri="openai://gpt-4", name="GPT-4", description="OpenAI GPT-4模型" ) ]if __name__ == "__main__": mcp.run(server, transport='stdio')
# deepseek_mcp_server.pyimport mcp.server as mcpfrom mcp.server import Serverfrom openai import OpenAIfrom pydantic import BaseModelimport os# DeepSeek的API与OpenAI兼容,但使用不同的base_urlclient = OpenAI( api_key=os.getenv("DEEPSEEK_API_KEY"), base_url="https://api.deepseek.com/v1")server = Server("deepseek-mcp-server")class DeepSeekRequest(BaseModel): message: str model: str = "deepseek-chat" temperature: float = 0.7@server.tool()async def deepseek_chat(request: DeepSeekRequest) -> str: """使用DeepSeek API进行对话""" try: response = client.chat.completions.create( model=request.model, messages=[{"role": "user", "content": request.message}], temperature=request.temperature ) return response.choices[0].message.content except Exception as e: return f"DeepSeek API调用失败: {str(e)}"if __name__ == "__main__": mcp.run(server, transport='stdio')
# prompt_templates.pyfrom string import Templatefrom datetime import datetimeclass PromptTemplate: def __init__(self, template_str: str): self.template = Template(template_str) def render(self, **kwargs) -> str: """渲染模板""" # 添加默认上下文 defaults = { 'current_time': datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'system_role': "你是一个有帮助的AI助手" } defaults.update(kwargs) return self.template.safe_substitute(defaults)# 定义各种场景的模板TEMPLATES = { "code_assistant": PromptTemplate("""$system_role当前时间: $current_time请帮助我解决以下编程问题:$user_query请提供详细的代码示例和解释。"""), "content_writer": PromptTemplate("""$system_role当前时间: $current_time请根据以下要求创作内容:主题: $topic字数要求: $word_count风格: $style请开始创作:"""), "data_analyzer": PromptTemplate("""$system_role当前时间: $current_time请分析以下数据:数据集描述: $dataset_description分析目标: $analysis_goal请提供详细的分析结果:""")}
# context_manager.pyfrom typing import Dict, Anyfrom prompt_templates import TEMPLATESclass ContextManager: def __init__(self): self.context_stores = {} def add_context(self, key: str, context: Any): """添加上下文信息""" self.context_stores[key] = context def get_context(self, key: str, default=None): """获取上下文信息""" return self.context_stores.get(key, default) def generate_prompt(self, template_name: str, user_input: str, **extra_context) -> str: """生成最终提示词""" if template_name not in TEMPLATES: raise ValueError(f"未知的模板: {template_name}") # 合并所有上下文 context = { 'user_query': user_input, **self.context_stores, **extra_context } return TEMPLATES[template_name].render(**context)# 使用示例context_manager = ContextManager()context_manager.add_context("user_level", "advanced")context_manager.add_context("preferred_language", "Python")prompt = context_manager.generate_prompt( "code_assistant", "如何实现一个快速排序算法?", complexity="high")
# conversation_manager.pyfrom typing import List, Dictfrom dataclasses import dataclass@dataclassclass Message: role: str # "user", "assistant", "system" content: str timestamp: strclass ConversationManager: def __init__(self, max_history: int = 10): self.history: List[Message] = [] self.max_history = max_history def add_message(self, role: str, content: str): """添加消息到历史记录""" from datetime import datetime message = Message( role=role, content=content, timestamp=datetime.now().isoformat() ) self.history.append(message) # 保持历史记录长度 if len(self.history) > self.max_history: self.history = self.history[-self.max_history:] def get_conversation_context(self) -> str: """获取对话上下文""" context_lines = [] for msg in self.history: context_lines.append(f"{msg.role}: {msg.content}") return "\n".join(context_lines) def generate_contextual_prompt(self, user_input: str, template_name: str) -> str: """生成包含对话上下文的提示词""" from prompt_templates import TEMPLATES conversation_context = self.get_conversation_context() prompt = TEMPLATES[template_name].render( user_query=user_input, conversation_history=conversation_context, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S") ) return prompt
# comprehensive_mcp_server.pyimport mcp.server as mcpfrom mcp.server import Serverfrom pydantic import BaseModelfrom typing import Optionalimport os# 导入各个模块from ollama_integration import OllamaIntegrationfrom openai_integration import OpenAIIntegrationfrom prompt_system import PromptSystemserver = Server("comprehensive-llm-server")class LLMRequest(BaseModel): prompt: str model_type: str = "ollama" # ollama, openai, deepseek model_name: Optional[str] = None max_tokens: int = 512 temperature: float = 0.7# 初始化各个集成模块ollama_integration = OllamaIntegration()openai_integration = OpenAIIntegration()prompt_system = PromptSystem()@server.tool()async def generate_text(request: LLMRequest) -> str: """统一的文本生成接口""" # 使用提示词系统增强用户输入 enhanced_prompt = prompt_system.enhance_prompt( request.prompt, context=prompt_system.get_current_context() ) # 根据模型类型选择后端 if request.model_type == "ollama": result = await ollama_integration.generate( enhanced_prompt, request.model_name, request.max_tokens ) elif request.model_type == "openai": result = await openai_integration.chat_completion( enhanced_prompt, request.model_name, request.temperature ) else: return "不支持的模型类型" # 记录到对话历史 prompt_system.add_to_history("user", request.prompt) prompt_system.add_to_history("assistant", result) return result@server.list_resources()async def list_all_models() -> list: """列出所有可用的模型""" ollama_models = await ollama_integration.list_models() openai_models = openai_integration.list_models() return ollama_models + openai_modelsif __name__ == "__main__": mcp.run(server, transport='stdio')
# client_example.pyimport asynciofrom mcp import ClientSessionfrom mcp.client.stdio import stdio_clientasync def main(): # 连接到MCP服务器 async with stdio_client("python", ["comprehensive_mcp_server.py"]) as (read, write): async with ClientSession(read, write) as session: # 初始化会话 await session.initialize() # 列出可用资源 resources = await session.list_resources() print("可用模型:", resources) # 使用Ollama生成文本 response = await session.call_tool( "generate_text", { "prompt": "解释一下机器学习的基本概念", "model_type": "ollama", "model_name": "llama2", "max_tokens": 300 } ) print("生成的响应:", response)if __name__ == "__main__": asyncio.run(main())
本文详细介绍了如何将MCP与大型语言模型进行深度集成,涵盖了本地模型(Ollama/vLLM)和在线模型(OpenAI/DeepSeek)的接入方案,以及提示词模板设计和动态上下文注入的高级技巧。
通过MCP协议,我们可以构建更加模块化、可扩展的AI应用系统,实现不同模型之间的无缝切换和组合使用。这种架构不仅提高了系统的灵活性,还为未来的功能扩展奠定了坚实的基础。
希望本教程能够帮助你在实际项目中成功实现MCP与LLM的深度集成,构建出更加强大和智能的AI应用。
测试开发全景图:人工智能测试、智能驱动、自动化、测试开发、左移右移与DevOps的持续交付 - 测试开发 - 爱测-测试人社区
相关文章
您好:这款游戏是可以开挂的,软件加微信【添加图中微信】确实是有挂的,很多玩家在这款游戏中打牌都会发现很多用户的牌特别好,总是好牌,而且好像能看到其他人...
2025-08-22 0
科技向新 AI向善——中国人工智能发展的星辰大海作者:康建才前言:浪潮之巅的盛会2025年8月14日,“中国·AI盛典”隆重揭晓年度AI人物与创新应用...
2025-08-22 0
金融界2025年8月22日消息,国家知识产权局信息显示,中山旋艺制管有限公司取得一项名为“一种焊管磨管机”的专利,授权公告号CN223251315U,...
2025-08-22 0
金融界2025年8月22日消息,国家知识产权局信息显示,成都西科微波通讯有限公司取得一项名为“一种用于相控阵天线分机的Ku/K频段上下变频方法及电路”...
2025-08-22 0
现在人们打棋牌麻将谁不想赢?手机微乐麻将必赢神器但是手机棋牌麻将是这么好赢的吗?在手机上打棋牌麻将想赢,不仅需要运气,也需要技巧。掌握的棋牌麻将技巧就...
2025-08-22 0
中国商报(记者 赵熠如)8月20日,百度发布了2025年第二季度财报。财报显示,2025年第二季度,百度总收入约为327亿元,同比减少约4%;归属百度...
2025-08-22 0
发表评论