更改了页面布局,调通了模型调用界面

This commit is contained in:
2026-01-13 12:09:41 +08:00
parent a3a19630b7
commit e1ab76a9a1
28 changed files with 6206 additions and 220 deletions

View File

@@ -51,7 +51,16 @@
"Bash(git add:*)",
"Bash(git rm:*)",
"Bash(git check-ignore:*)",
"Bash(./setup.sh:*)"
"Bash(./setup.sh:*)",
"Bash(ss:*)",
"Bash(bash:*)",
"Bash(/d/Softwares/npm/npm install)",
"Bash(npm config:*)",
"Bash(cmd:*)",
"Bash(grep:*)",
"Bash(tasklist:*)",
"Bash(./xrequest/Scripts/python.exe:*)",
"Bash(xrequest/Scripts/python.exe:*)"
]
}
}

122
models/models.json Normal file
View File

@@ -0,0 +1,122 @@
[
{
"id": "model-1",
"name": "GPT-4 Turbo",
"type": "gpt",
"provider": "openai",
"version": "gpt-4-turbo",
"apiUrl": "https://api.openai.com/v1",
"apiKey": "sk-test123456789",
"timeout": 30,
"maxRetries": 3,
"temperature": 0.7,
"topP": 1.0,
"topK": 50,
"maxTokens": 2048,
"systemPrompt": "你是一个有用的AI助手。",
"streaming": true,
"functions": false,
"logRequests": true,
"status": "连接失败"
},
{
"id": "model-2",
"name": "Claude-3 Sonnet",
"type": "claude",
"provider": "anthropic",
"version": "claude-3-sonnet-20240229",
"apiUrl": "https://api.anthropic.com/v1",
"apiKey": "",
"timeout": 30,
"maxRetries": 3,
"temperature": 0.5,
"topP": 0.9,
"topK": 40,
"maxTokens": 4096,
"systemPrompt": "你是一个有用的AI助手。",
"streaming": true,
"functions": true,
"logRequests": true,
"status": "未测试"
},
{
"id": "model-3",
"name": "LLaMA-2 7B",
"type": "llama",
"provider": "local",
"version": "llama-2-7b-chat",
"apiUrl": "http://localhost:8080/v1",
"apiKey": "",
"timeout": 60,
"maxRetries": 3,
"temperature": 0.8,
"topP": 0.95,
"topK": 60,
"maxTokens": 2048,
"systemPrompt": "你是一个有用的AI助手。",
"streaming": true,
"functions": false,
"logRequests": false,
"status": "已测试"
},
{
"id": "0c13f76a-6a40-48d2-86d3-2638fd2be652",
"name": "Test Model",
"type": "gpt",
"provider": "openai",
"version": "gpt-3.5-turbo",
"apiUrl": "",
"apiKey": "",
"timeout": 30,
"maxRetries": 3,
"temperature": 0.7,
"topP": 1.0,
"topK": 50,
"maxTokens": 2048,
"systemPrompt": "你是一个有用的AI助手。",
"streaming": true,
"functions": false,
"logRequests": true,
"status": "未测试"
},
{
"id": "9674adec-124c-4641-898b-7f7557e9e412",
"name": "nova",
"type": "gpt",
"provider": "openai",
"version": "nova",
"apiUrl": "http://10.10.10.122:1234/v1",
"apiKey": "123",
"timeout": 30,
"maxRetries": 3,
"temperature": 0.7,
"topP": 1,
"topK": 50,
"maxTokens": 2048,
"systemPrompt": "你是一个有用的AI助手。",
"streaming": true,
"functions": false,
"logRequests": true,
"status": "已测试"
},
{
"id": "e81c21e1-a4ce-4237-ba22-0922b741b9be",
"name": "qwen3-flash",
"type": "custom",
"provider": "openai",
"version": "qwen-flash",
"apiUrl": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"apiKey": "sk-5706307e3e3a4eb09452dbf0bb87fe31",
"timeout": 60,
"maxRetries": 3,
"temperature": 0.7,
"topP": 1,
"topK": 50,
"maxTokens": 2048,
"systemPrompt": "你是一个有用的AI助手。",
"streaming": true,
"functions": false,
"logRequests": true,
"status": "已测试"
}
]

View File

@@ -1 +1 @@
3335
3807

View File

@@ -1,17 +0,0 @@
# 核心依赖
fastapi==0.104.1
uvicorn[standard]==0.24.0
pydantic==2.5.0
pydantic-settings==2.1.0
psutil==5.9.8
# 日志相关
structlog==23.2.0
colorama==0.4.6
python-json-logger==2.0.7
# 异步文件操作
aiofiles==23.2.1
# 表单/文件上传FastAPI UploadFile/Form 需要)
python-multipart==0.0.9

View File

@@ -30,8 +30,8 @@ class BaseAPI(ABC):
def __init__(self):
"""初始化基类"""
# 获取模块名作为路由前缀
self.module_name = self.__class__.__module__.split('.')[-1]
# 获取模块名作为路由前缀(允许子类覆盖)
self.module_name = getattr(self, '_override_module_name', None) or self.__class__.__module__.split('.')[-1]
self.router_prefix = f"/{self.module_name}"
# 创建路由器

View File

@@ -32,36 +32,14 @@ def format_file_size(size_bytes: int) -> str:
class DatasetAPI(BaseAPI):
"""数据集管理 API - 自动注册到 /api/datasets 路径"""
"""数据集管理 API"""
def __init__(self):
# 重写初始化逻辑以设置正确的路由前缀
# 1. 手动设置 module_name
self.module_name = "api.datasets"
# 2. 创建路由器(使用期望的前缀)
from fastapi import APIRouter
self.router_prefix = "/api/datasets"
self.router = APIRouter(
prefix=self.router_prefix,
tags=["Datasets"]
)
# 3. 获取日志器
import sys
self.logger = __import__('src.utils.logger', fromlist=['get_logger']).get_logger(self.__class__.__module__)
# 4. 调用基类的自动注册此时router已被覆盖
# 注意我们不调用父类__init__而是手动调用_auto_register_routes
self._auto_register_routes()
# 5. 记录初始化
self.logger.info(
f"API模块初始化完成",
module=self.module_name,
prefix=self.router_prefix,
routes=len(self.router.routes)
)
"""初始化"""
# 在调用super().__init__()之前设置module_name
self._override_module_name = "datasets"
super().__init__()
self.logger.info("DatasetAPI 初始化完成")
@post("/upload", response_model=StandardResponse)
async def upload_dataset(
@@ -143,6 +121,12 @@ class DatasetAPI(BaseAPI):
Returns:
StandardResponse: 包含数据集列表的标准响应
"""
# 添加调试日志
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info(f"list_datasets called with list_all={list_all}")
try:
if list_all:
# 列出data目录下的所有文件物理文件

View File

@@ -0,0 +1,892 @@
"""
模型配置管理模块
支持模型的增删改查操作
"""
import os
import json
import uuid
import httpx
import asyncio
from typing import List, Dict, Any, Optional
from fastapi import HTTPException, Body, Response
from fastapi.responses import StreamingResponse
import logging
# 导入基类
from src.api.internal.base import BaseAPI, get, post, put, delete
# 配置日志
logger = logging.getLogger(__name__)
# 模型配置文件路径
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
MODELS_CONFIG_PATH = os.path.join(BASE_DIR, "..", "..", "models", "models.json")
# 规范化路径
MODELS_CONFIG_PATH = os.path.normpath(MODELS_CONFIG_PATH)
async def handle_streaming_response(response: httpx.Response, model: Dict[str, Any], attempt: int, max_attempts: int) -> Dict[str, Any]:
"""处理流式响应"""
try:
if response.status_code != 200:
error_msg = f"API调用失败 (状态码: {response.status_code})"
try:
error_detail = response.json()
error_msg += f": {error_detail}"
except:
error_msg += f": {response.text}"
return {
'success': False,
'error': error_msg,
'status_code': response.status_code
}
full_content = ""
chunk_count = 0
usage = {}
# 处理流式数据
async for line in response.aiter_lines():
if line.startswith('data: '):
data_str = line[6:] # 移除 'data: ' 前缀
if data_str.strip() == '[DONE]':
break
try:
chunk = json.loads(data_str)
chunk_count += 1
# 提取内容
if 'choices' in chunk and len(chunk['choices']) > 0:
delta = chunk['choices'][0].get('delta', {})
if 'content' in delta:
content = delta['content']
full_content += content
# 提取使用统计
if 'usage' in chunk:
usage = chunk['usage']
except json.JSONDecodeError:
continue
logger.info(f"流式响应完成 (尝试 {attempt}/{max_attempts}) - 接收 {chunk_count} 个数据块")
return {
'success': True,
'model': model['name'],
'content': full_content,
'usage': usage,
'streaming': True,
'chunks_received': chunk_count,
'raw_response': {
'object': 'chat.completion',
'model': model.get('version', 'unknown'),
'choices': [{
'message': {'role': 'assistant', 'content': full_content},
'finish_reason': 'stop'
}],
'usage': usage
}
}
except Exception as e:
logger.error(f"流式响应处理错误: {str(e)}")
return {
'success': False,
'error': f'流式响应处理失败: {str(e)}',
'error_type': type(e).__name__
}
class ModelManager:
"""模型配置管理器"""
@staticmethod
def load_models() -> List[Dict[str, Any]]:
"""从JSON文件加载所有模型配置"""
try:
if not os.path.exists(MODELS_CONFIG_PATH):
logger.warning(f"模型配置文件不存在: {MODELS_CONFIG_PATH}")
return []
with open(MODELS_CONFIG_PATH, 'r', encoding='utf-8') as f:
models = json.load(f)
logger.info(f"成功加载 {len(models)} 个模型配置")
return models
except Exception as e:
logger.error(f"加载模型配置失败: {str(e)}")
return []
@staticmethod
def save_models(models: List[Dict[str, Any]]) -> bool:
"""保存模型配置到JSON文件"""
try:
# 确保目录存在
os.makedirs(os.path.dirname(MODELS_CONFIG_PATH), exist_ok=True)
with open(MODELS_CONFIG_PATH, 'w', encoding='utf-8') as f:
json.dump(models, f, ensure_ascii=False, indent=2)
logger.info(f"成功保存 {len(models)} 个模型配置")
return True
except Exception as e:
logger.error(f"保存模型配置失败: {str(e)}")
return False
@staticmethod
def get_model_by_id(model_id: str) -> Optional[Dict[str, Any]]:
"""根据ID获取单个模型配置"""
models = ModelManager.load_models()
for model in models:
if model.get('id') == model_id:
return model
return None
@staticmethod
def create_model(model_data: Dict[str, Any]) -> Dict[str, Any]:
"""创建新模型配置"""
models = ModelManager.load_models()
# 生成唯一ID
model_id = str(uuid.uuid4())
# 创建新模型
new_model = {
'id': model_id,
'name': model_data.get('name', ''),
'type': model_data.get('type', ''),
'provider': model_data.get('provider', ''),
'version': model_data.get('version', ''),
'apiUrl': model_data.get('apiUrl', ''),
'apiKey': model_data.get('apiKey', ''),
'timeout': model_data.get('timeout', 30),
'maxRetries': model_data.get('maxRetries', 3),
'temperature': model_data.get('temperature', 0.7),
'topP': model_data.get('topP', 1.0),
'topK': model_data.get('topK', 50),
'maxTokens': model_data.get('maxTokens', 2048),
'systemPrompt': model_data.get('systemPrompt', '你是一个有用的AI助手。'),
'streaming': model_data.get('streaming', True),
'functions': model_data.get('functions', False),
'logRequests': model_data.get('logRequests', True),
'status': model_data.get('status', '未测试')
}
# 添加到列表
models.append(new_model)
# 保存
if ModelManager.save_models(models):
return new_model
else:
raise HTTPException(status_code=500, detail="保存模型配置失败")
@staticmethod
def update_model(model_id: str, model_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""更新模型配置"""
models = ModelManager.load_models()
# 查找并更新模型
for i, model in enumerate(models):
if model.get('id') == model_id:
# 更新模型数据
updated_model = models[i].copy()
updated_model.update(model_data)
updated_model['id'] = model_id # 确保ID不变
models[i] = updated_model
# 保存
if ModelManager.save_models(models):
return updated_model
else:
raise HTTPException(status_code=500, detail="保存模型配置失败")
return None
@staticmethod
def delete_model(model_id: str) -> bool:
"""删除模型配置"""
models = ModelManager.load_models()
# 查找并删除
for i, model in enumerate(models):
if model.get('id') == model_id:
del models[i]
# 保存
if ModelManager.save_models(models):
return True
else:
raise HTTPException(status_code=500, detail="保存模型配置失败")
return False
@staticmethod
async def call_model(model_id: str, prompt: str, system_prompt: Optional[str] = None) -> Dict[str, Any]:
"""真实调用模型API"""
model = ModelManager.get_model_by_id(model_id)
if not model:
raise HTTPException(status_code=404, detail="模型不存在")
# 检查API配置
api_url = model.get('apiUrl', '').strip()
api_key = model.get('apiKey', '').strip()
version = model.get('version', '').strip()
provider = model.get('provider', '').strip()
# 调试日志
logger.info(f"Model {model.get('name')} config", extra={
'provider': provider,
'api_url': api_url,
'version': version,
'has_api_key': bool(api_key),
'api_key_length': len(api_key) if api_key else 0
})
if not api_url:
raise HTTPException(status_code=400, detail="模型API地址未配置")
if not version:
raise HTTPException(status_code=400, detail="模型版本未配置")
# 准备请求数据
request_data = {
"model": version,
"messages": [],
"temperature": model.get('temperature', 0.7),
"top_p": model.get('topP', 1.0),
"max_tokens": model.get('maxTokens', 2048),
"stream": model.get('streaming', False)
}
# 添加系统提示词
if system_prompt:
request_data["messages"].append({
"role": "system",
"content": system_prompt
})
elif model.get('systemPrompt'):
request_data["messages"].append({
"role": "system",
"content": model.get('systemPrompt')
})
# 添加用户提示词
request_data["messages"].append({
"role": "user",
"content": prompt
})
# 设置请求头
headers = {
"Content-Type": "application/json"
}
# 添加API密钥如果提供
if api_key:
provider_lower = provider.lower()
# 根据提供商类型设置认证头
if provider_lower == 'anthropic':
# Anthropic API
headers["x-api-key"] = api_key
headers["anthropic-version"] = "2023-06-01"
logger.info(f"设置Anthropic认证头x-api-key长度: {len(api_key)}")
else:
# OpenAI 和其他兼容的API包括自定义
headers["Authorization"] = f"Bearer {api_key}"
logger.info(f"设置Bearer认证头key长度: {len(api_key)}")
# 记录最终使用的请求头隐藏API key
logger.info(f"最终请求头", extra={
'headers': {k: v if k != 'Authorization' and k != 'x-api-key' else '***HIDDEN***' for k, v in headers.items()},
'api_key_masked': f"{api_key[:4]}...{api_key[-4:]}" if len(api_key) > 8 else '***'
})
else:
logger.warning(f"未提供API key for model {model.get('name')}")
# 记录最终使用的请求头
logger.info(f"最终请求头", extra={
'headers': headers,
'api_key': 'NONE'
})
# 获取重试配置
max_retries = model.get('maxRetries', 3)
retry_delay = 2 # 重试间隔2秒
last_error = None
# 重试循环
for attempt in range(max_retries + 1):
try:
# 发送API请求
timeout = httpx.Timeout(model.get('timeout', 30))
provider = model.get('provider', '').lower()
async with httpx.AsyncClient(timeout=timeout) as client:
# 根据提供商类型调用不同的API端点
if provider_lower == 'anthropic':
# Anthropic API格式
anthropic_request = {
"model": version,
"max_tokens": request_data["max_tokens"],
"messages": request_data["messages"][1:] # 移除system messageAnthropic使用system参数
}
if system_prompt or model.get('systemPrompt'):
anthropic_request["system"] = system_prompt or model.get('systemPrompt')
logger.info(f"发送Anthropic API请求 (尝试 {attempt + 1}/{max_retries + 1})", extra={
'url': f"{api_url.rstrip('/')}/messages",
'headers': {k: v if k != 'x-api-key' else '***HIDDEN***' for k, v in headers.items()},
'request_body': anthropic_request
})
response = await client.post(
f"{api_url.rstrip('/')}/messages",
headers=headers,
json=anthropic_request
)
else:
# OpenAI 和其他兼容的API包括自定义、本地部署等
logger.info(f"发送OpenAI兼容API请求 (尝试 {attempt + 1}/{max_retries + 1})", extra={
'url': f"{api_url.rstrip('/')}/chat/completions",
'headers': {k: v if k != 'Authorization' else '***HIDDEN***' for k, v in headers.items()},
'request_body': request_data
})
response = await client.post(
f"{api_url.rstrip('/')}/chat/completions",
headers=headers,
json=request_data
)
# 处理流式响应
if request_data.get('stream', False):
return await handle_streaming_response(response, model, attempt + 1, max_retries + 1)
# 检查响应状态(非流式)
if response.status_code == 200:
result = response.json()
# 解析响应 - 根据提供商类型
provider = model.get('provider', '').lower()
if provider == 'anthropic' and "content" in result and len(result["content"]) > 0:
# Anthropic格式响应
content = result["content"][0]["text"]
logger.info(f"API调用成功 (尝试 {attempt + 1})")
return {
'success': True,
'model': model['name'],
'content': content,
'usage': result.get('usage', {}),
'raw_response': result
}
elif "choices" in result and len(result["choices"]) > 0:
# OpenAI和其他兼容API格式响应
content = result["choices"][0]["message"]["content"]
logger.info(f"API调用成功 (尝试 {attempt + 1})")
return {
'success': True,
'model': model['name'],
'content': content,
'usage': result.get('usage', {}),
'raw_response': result
}
return {
'success': False,
'error': '无法解析API响应',
'raw_response': result
}
else:
error_msg = f"API调用失败 (状态码: {response.status_code})"
try:
error_detail = response.json()
error_msg += f": {error_detail}"
except:
error_msg += f": {response.text}"
last_error = {
'success': False,
'error': error_msg,
'status_code': response.status_code
}
# 如果不是最后一次尝试,等待后重试
if attempt < max_retries:
logger.warning(f"API调用失败{retry_delay}秒后重试 (尝试 {attempt + 1}/{max_retries + 1}): {error_msg}")
await asyncio.sleep(retry_delay)
continue
else:
# 最后一次尝试失败,返回错误
return last_error
except httpx.TimeoutException as e:
last_error = {
'success': False,
'error': 'API调用超时',
'timeout': model.get('timeout', 30)
}
logger.warning(f"API调用超时 (尝试 {attempt + 1}/{max_retries + 1})")
# 如果不是最后一次尝试,等待后重试
if attempt < max_retries:
await asyncio.sleep(retry_delay)
continue
else:
return last_error
except httpx.RequestError as e:
last_error = {
'success': False,
'error': f'网络请求错误: {str(e)}'
}
logger.warning(f"网络请求错误 (尝试 {attempt + 1}/{max_retries + 1}): {str(e)}")
# 如果不是最后一次尝试,等待后重试
if attempt < max_retries:
await asyncio.sleep(retry_delay)
continue
else:
return last_error
except Exception as e:
last_error = {
'success': False,
'error': f'调用失败: {str(e)}',
'error_type': type(e).__name__
}
logger.error(f"未知错误 (尝试 {attempt + 1}/{max_retries + 1}): {str(e)}")
# 其他错误不重试,直接返回
return last_error
# 如果所有重试都失败,返回最后一次错误
return last_error
@staticmethod
def test_model(model_id: str) -> Dict[str, Any]:
"""测试模型连接 - 使用真实的API调用"""
import asyncio
import threading
# 使用新线程中的事件循环
result_holder = {}
def run_test():
"""在新线程中运行测试"""
try:
# 直接使用asyncio.run(),不手动设置事件循环
test_result = asyncio.run(
ModelManager.call_model(
model_id=model_id,
prompt="你好,请回复'测试成功'"
)
)
result_holder['success'] = True
result_holder['result'] = test_result
except Exception as e:
result_holder['success'] = False
result_holder['error'] = str(e)
# 在新线程中运行
thread = threading.Thread(target=run_test)
thread.start()
thread.join()
# 检查结果
if not result_holder.get('success'):
error = result_holder.get('error', '未知错误')
logger.error(f"测试模型连接时发生错误: {error}")
# 更新模型状态为连接失败
model = ModelManager.get_model_by_id(model_id)
if model:
ModelManager.update_model(model_id, {'status': '连接失败'})
return {
'model_id': model_id,
'success': False,
'status': '连接失败',
'message': f'测试过程出错: {error}',
'error': error
}
test_result = result_holder.get('result', {})
# 检查测试结果
success = test_result.get('success', False)
model = ModelManager.get_model_by_id(model_id)
# 更新模型状态
if model:
if success:
model['status'] = '已测试'
message = '连接测试成功'
else:
model['status'] = '连接失败'
message = f'连接测试失败: {test_result.get("error", "未知错误")}'
# 保存更新
ModelManager.update_model(model_id, {'status': model['status']})
return {
'model_id': model_id,
'success': success,
'status': model['status'] if model else '未知',
'message': message,
'test_result': test_result
}
class ModelsAPI(BaseAPI):
"""模型配置管理API"""
def __init__(self):
"""初始化"""
super().__init__()
self.logger.info("ModelsAPI 初始化完成")
# API路由定义
@get("/")
async def get_models(self):
"""获取所有模型配置"""
try:
models = ModelManager.load_models()
return self.success({
'models': models,
'total': len(models)
}, "获取模型配置成功")
except Exception as e:
self.logger.error(f"获取模型配置失败: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@get("/{model_id}")
async def get_model(self, model_id: str):
"""获取单个模型配置"""
try:
model = ModelManager.get_model_by_id(model_id)
if not model:
raise HTTPException(status_code=404, detail="模型不存在")
return self.success(model, "获取模型配置成功")
except HTTPException:
raise
except Exception as e:
self.logger.error(f"获取模型配置失败: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@post("/")
async def create_model(self, model_data: Dict[str, Any] = Body(...)):
"""创建新模型配置"""
try:
# 验证必填字段
if not model_data.get('name'):
raise HTTPException(status_code=400, detail="模型名称不能为空")
new_model = ModelManager.create_model(model_data)
return self.success(new_model, "创建模型配置成功")
except HTTPException:
raise
except Exception as e:
self.logger.error(f"创建模型配置失败: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@put("/{model_id}")
async def update_model(self, model_id: str, model_data: Dict[str, Any] = Body(...)):
"""更新模型配置"""
try:
# 验证模型是否存在
existing_model = ModelManager.get_model_by_id(model_id)
if not existing_model:
raise HTTPException(status_code=404, detail="模型不存在")
updated_model = ModelManager.update_model(model_id, model_data)
return self.success(updated_model, "更新模型配置成功")
except HTTPException:
raise
except Exception as e:
self.logger.error(f"更新模型配置失败: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@delete("/{model_id}")
async def delete_model(self, model_id: str):
"""删除模型配置"""
try:
# 验证模型是否存在
existing_model = ModelManager.get_model_by_id(model_id)
if not existing_model:
raise HTTPException(status_code=404, detail="模型不存在")
success = ModelManager.delete_model(model_id)
if success:
return self.success(None, "删除模型配置成功")
else:
raise HTTPException(status_code=500, detail="删除模型配置失败")
except HTTPException:
raise
except Exception as e:
self.logger.error(f"删除模型配置失败: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@post("/{model_id}/test")
async def test_model_connection(self, model_id: str):
"""测试模型连接"""
try:
# 直接调用异步的call_model方法
result = await ModelManager.call_model(
model_id=model_id,
prompt="你好,请回复'测试成功'"
)
# 更新模型状态
model = ModelManager.get_model_by_id(model_id)
if model:
if result.get('success'):
model['status'] = '已测试'
message = '连接测试成功'
else:
model['status'] = '连接失败'
message = f'连接测试失败: {result.get("error", "未知错误")}'
# 保存更新
ModelManager.update_model(model_id, {'status': model['status']})
test_result = {
'model_id': model_id,
'success': result.get('success', False),
'status': model['status'] if model else '未知',
'message': message,
'test_result': result
}
return self.success(test_result, message)
except HTTPException:
raise
except Exception as e:
self.logger.error(f"测试模型连接失败: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@post("/{model_id}/call")
async def call_model_api(self, model_id: str, request_data: Dict[str, Any] = Body(...)):
"""调用模型进行对话(支持流式输出)"""
try:
# 获取请求参数
prompt = request_data.get('prompt', '')
system_prompt = request_data.get('systemPrompt', None)
stream_enabled = request_data.get('stream', False)
if not prompt:
raise HTTPException(status_code=400, detail="提示词不能为空")
# 如果启用流式,返回流式响应
if stream_enabled:
return StreamingResponse(
self._stream_data(model_id, prompt, system_prompt),
media_type="text/plain",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Transfer-Encoding": "chunked"
}
)
else:
# 普通非流式调用
result = await ModelManager.call_model(
model_id=model_id,
prompt=prompt,
system_prompt=system_prompt
)
return self.success(result, "模型调用成功" if result.get('success') else "模型调用失败")
except HTTPException:
raise
except Exception as e:
self.logger.error(f"调用模型失败: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
async def _stream_data(self, model_id: str, prompt: str, system_prompt: Optional[str] = None):
"""真正的流式输出数据"""
try:
# 重新调用模型,获取流式响应
model = ModelManager.get_model_by_id(model_id)
if not model:
yield "data: " + json.dumps({"error": "模型不存在", "done": True}) + "\n\n"
return
# 设置流式请求
api_url = model.get('apiUrl', '').strip()
api_key = model.get('apiKey', '').strip()
version = model.get('version', '').strip()
provider = model.get('provider', '').strip()
# 准备请求数据
request_data = {
"model": version,
"messages": [],
"temperature": model.get('temperature', 0.7),
"top_p": model.get('topP', 1.0),
"max_tokens": model.get('maxTokens', 2048),
"stream": True # 启用流式
}
# 添加系统提示词
if system_prompt:
request_data["messages"].append({
"role": "system",
"content": system_prompt
})
elif model.get('systemPrompt'):
request_data["messages"].append({
"role": "system",
"content": model.get('systemPrompt')
})
# 添加用户提示词
request_data["messages"].append({
"role": "user",
"content": prompt
})
# 设置请求头
headers = {
"Content-Type": "application/json"
}
if api_key:
if provider.lower() == 'anthropic':
headers["x-api-key"] = api_key
headers["anthropic-version"] = "2023-06-01"
else:
headers["Authorization"] = f"Bearer {api_key}"
# 发送流式请求
async with httpx.AsyncClient(timeout=60) as client:
if provider.lower() == 'anthropic':
# Anthropic流式请求
anthropic_request = {
"model": version,
"max_tokens": request_data["max_tokens"],
"messages": request_data["messages"][1:],
"stream": True
}
if system_prompt or model.get('systemPrompt'):
anthropic_request["system"] = system_prompt or model.get('systemPrompt')
async with client.stream(
"POST",
f"{api_url.rstrip('/')}/messages",
headers=headers,
json=anthropic_request
) as response:
async for line in response.aiter_lines():
if line.startswith('data: '):
data_str = line[6:]
if data_str.strip() == '[DONE]':
yield "data: " + json.dumps({"content": "", "done": True}) + "\n\n"
break
try:
chunk_data = json.loads(data_str)
if "content" in chunk_data and len(chunk_data["content"]) > 0:
content = chunk_data["content"][0]["text"]
yield f"data: {json.dumps({'content': content, 'done': False})}\n\n"
except:
continue
else:
# OpenAI兼容流式请求
async with client.stream(
"POST",
f"{api_url.rstrip('/')}/chat/completions",
headers=headers,
json=request_data
) as response:
async for line in response.aiter_lines():
if line.startswith('data: '):
data_str = line[6:]
if data_str.strip() == '[DONE]':
yield "data: " + json.dumps({"content": "", "done": True}) + "\n\n"
break
try:
chunk_data = json.loads(data_str)
if "choices" in chunk_data and len(chunk_data["choices"]) > 0:
delta = chunk_data["choices"][0].get("delta", {})
if "content" in delta and delta["content"]:
content = delta["content"]
yield f"data: {json.dumps({'content': content, 'done': False})}\n\n"
except:
continue
except Exception as e:
yield "data: " + json.dumps({"error": str(e), "done": True}) + "\n\n"
@post("/batch-delete")
async def batch_delete_models(self, model_ids: List[str] = Body(...)):
"""批量删除模型"""
try:
deleted_count = 0
failed_count = 0
for model_id in model_ids:
if ModelManager.delete_model(model_id):
deleted_count += 1
else:
failed_count += 1
return self.success({
'deleted_count': deleted_count,
'failed_count': failed_count
}, f'批量删除完成: 成功 {deleted_count} 个, 失败 {failed_count}')
except Exception as e:
self.logger.error(f"批量删除模型失败: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@get("/providers/list")
async def get_model_providers(self):
"""获取模型提供方列表"""
providers = [
{'value': 'openai', 'label': 'OpenAI'},
{'value': 'anthropic', 'label': 'Anthropic'},
{'value': 'azure', 'label': 'Azure OpenAI'},
{'value': 'local', 'label': '本地部署'},
{'value': 'custom', 'label': '自定义'}
]
return self.success(providers, "获取提供方列表成功")
@get("/types/list")
async def get_model_types(self):
"""获取模型类型列表"""
types = [
{'value': 'gpt', 'label': 'GPT 系列'},
{'value': 'bert', 'label': 'BERT 系列'},
{'value': 'llama', 'label': 'LLaMA 系列'},
{'value': 'claude', 'label': 'Claude 系列'},
{'value': 'custom', 'label': '自定义'}
]
return self.success(types, "获取模型类型列表成功")
@get("/status/list")
async def get_model_statuses(self):
"""获取模型状态列表"""
statuses = [
{'value': '已测试', 'label': '已测试', 'color': 'green'},
{'value': '连接失败', 'label': '连接失败', 'color': 'red'},
{'value': '未测试', 'label': '未测试', 'color': 'gray'}
]
return self.success(statuses, "获取模型状态列表成功")
# 创建API实例
models_api = ModelsAPI()

View File

@@ -246,8 +246,19 @@ def setup_routes(app: FastAPI):
registration_result = auto_register_routes(app)
# 挂载静态文件目录(放在最后,避免覆盖其他路由)
app.mount("/", StaticFiles(directory="static", html=True), name="static")
# 挂载静态文件目录
from fastapi.responses import FileResponse
# 为前端页面创建特定路由
@app.get("/dashboard")
async def dashboard():
"""前端监控界面"""
return FileResponse("static/index.html")
# 只挂载静态资源CSS, JS, 图片等)- 如果目录存在
import os
if os.path.exists("static"):
app.mount("/static", StaticFiles(directory="static"), name="static")
# 生成离线API文档
generate_docs(app)

View File

@@ -1 +1 @@
0dc715b603074fca864c0c5074317e6a
0bbf8a1a0807bf0cee9cf3cc5634f318

View File

@@ -689,14 +689,14 @@
}
}
},
"/api/datasets/{file_id}": {
"/datasets/{file_id}": {
"delete": {
"tags": [
"Datasets"
],
"summary": "delete_dataset",
"description": "删除数据集\n\nArgs:\n file_id: 文件ID\n\nReturns:\n StandardResponse: 包含删除结果的标准响应",
"operationId": "delete_dataset_api_datasets__file_id__delete",
"operationId": "delete_dataset_datasets__file_id__delete",
"parameters": [
{
"name": "file_id",
@@ -737,7 +737,7 @@
],
"summary": "get_dataset",
"description": "获取特定数据集的详细信息\n\nArgs:\n file_id: 文件ID\n\nReturns:\n StandardResponse: 包含数据集详情的标准响应",
"operationId": "get_dataset_api_datasets__file_id__get",
"operationId": "get_dataset_datasets__file_id__get",
"parameters": [
{
"name": "file_id",
@@ -773,14 +773,14 @@
}
}
},
"/api/datasets/{file_id}/content": {
"/datasets/{file_id}/content": {
"get": {
"tags": [
"Datasets"
],
"summary": "get_dataset_content",
"description": "获取数据集文件内容前N条记录\n\nArgs:\n file_id: 文件ID\n limit: 返回的记录数量默认5条\n\nReturns:\n StandardResponse: 包含数据集内容的标准响应",
"operationId": "get_dataset_content_api_datasets__file_id__content_get",
"operationId": "get_dataset_content_datasets__file_id__content_get",
"parameters": [
{
"name": "file_id",
@@ -826,14 +826,14 @@
}
}
},
"/api/datasets/list-files": {
"/datasets/list-files": {
"get": {
"tags": [
"Datasets"
],
"summary": "list_data_files",
"description": "查询data目录下的文件列表\n\nReturns:\n StandardResponse: 包含文件列表的标准响应",
"operationId": "list_data_files_api_datasets_list_files_get",
"operationId": "list_data_files_datasets_list_files_get",
"responses": {
"200": {
"description": "Successful Response",
@@ -848,14 +848,14 @@
}
}
},
"/api/datasets": {
"/datasets": {
"get": {
"tags": [
"Datasets"
],
"summary": "list_datasets",
"description": "获取所有数据集列表\n\nArgs:\n list_all: 是否列出data目录下的所有文件物理文件默认False只列出API上传的文件\n\nReturns:\n StandardResponse: 包含数据集列表的标准响应",
"operationId": "list_datasets_api_datasets_get",
"operationId": "list_datasets_datasets_get",
"parameters": [
{
"name": "list_all",
@@ -892,19 +892,19 @@
}
}
},
"/api/datasets/upload": {
"/datasets/upload": {
"post": {
"tags": [
"Datasets"
],
"summary": "upload_dataset",
"description": "上传数据集文件\n\nArgs:\n file: 上传的文件(支持 .json, .jsonl 格式)\n description: 文件描述(可选)\n\nReturns:\n StandardResponse: 包含上传结果的标准响应",
"operationId": "upload_dataset_api_datasets_upload_post",
"operationId": "upload_dataset_datasets_upload_post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_upload_dataset_api_datasets_upload_post"
"$ref": "#/components/schemas/Body_upload_dataset_datasets_upload_post"
}
}
},
@@ -933,6 +933,392 @@
}
}
}
},
"/models/batch-delete": {
"post": {
"tags": [
"Models"
],
"summary": "batch_delete_models",
"description": "批量删除模型",
"operationId": "batch_delete_models_models_batch_delete_post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"items": {
"type": "string"
},
"type": "array",
"title": "Model Ids"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
}
},
"/models/{model_id}/call": {
"post": {
"tags": [
"Models"
],
"summary": "call_model_api",
"description": "调用模型进行对话(支持流式输出)",
"operationId": "call_model_api_models__model_id__call_post",
"parameters": [
{
"name": "model_id",
"in": "path",
"required": true,
"schema": {
"type": "string",
"title": "Model Id"
}
}
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"title": "Request Data"
}
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
}
},
"/models/": {
"get": {
"tags": [
"Models"
],
"summary": "get_models",
"description": "获取所有模型配置",
"operationId": "get_models_models__get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
}
}
},
"post": {
"tags": [
"Models"
],
"summary": "create_model",
"description": "创建新模型配置",
"operationId": "create_model_models__post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"type": "object",
"title": "Model Data"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
}
},
"/models/{model_id}": {
"delete": {
"tags": [
"Models"
],
"summary": "delete_model",
"description": "删除模型配置",
"operationId": "delete_model_models__model_id__delete",
"parameters": [
{
"name": "model_id",
"in": "path",
"required": true,
"schema": {
"type": "string",
"title": "Model Id"
}
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
},
"get": {
"tags": [
"Models"
],
"summary": "get_model",
"description": "获取单个模型配置",
"operationId": "get_model_models__model_id__get",
"parameters": [
{
"name": "model_id",
"in": "path",
"required": true,
"schema": {
"type": "string",
"title": "Model Id"
}
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
},
"put": {
"tags": [
"Models"
],
"summary": "update_model",
"description": "更新模型配置",
"operationId": "update_model_models__model_id__put",
"parameters": [
{
"name": "model_id",
"in": "path",
"required": true,
"schema": {
"type": "string",
"title": "Model Id"
}
}
],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"type": "object",
"title": "Model Data"
}
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
}
},
"/models/providers/list": {
"get": {
"tags": [
"Models"
],
"summary": "get_model_providers",
"description": "获取模型提供方列表",
"operationId": "get_model_providers_models_providers_list_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
}
}
}
},
"/models/status/list": {
"get": {
"tags": [
"Models"
],
"summary": "get_model_statuses",
"description": "获取模型状态列表",
"operationId": "get_model_statuses_models_status_list_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
}
}
}
},
"/models/types/list": {
"get": {
"tags": [
"Models"
],
"summary": "get_model_types",
"description": "获取模型类型列表",
"operationId": "get_model_types_models_types_list_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
}
}
}
},
"/models/{model_id}/test": {
"post": {
"tags": [
"Models"
],
"summary": "test_model_connection",
"description": "测试模型连接",
"operationId": "test_model_connection_models__model_id__test_post",
"parameters": [
{
"name": "model_id",
"in": "path",
"required": true,
"schema": {
"type": "string",
"title": "Model Id"
}
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
}
}
}
}
}
},
"components": {
@@ -971,7 +1357,7 @@
"title": "BatchDownloadRequest",
"description": "批量下载请求模型"
},
"Body_upload_dataset_api_datasets_upload_post": {
"Body_upload_dataset_datasets_upload_post": {
"properties": {
"file": {
"type": "string",
@@ -994,7 +1380,7 @@
"required": [
"file"
],
"title": "Body_upload_dataset_api_datasets_upload_post"
"title": "Body_upload_dataset_datasets_upload_post"
},
"HTTPValidationError": {
"properties": {

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,247 @@
<#
.Synopsis
Activate a Python virtual environment for the current PowerShell session.
.Description
Pushes the python executable for a virtual environment to the front of the
$Env:PATH environment variable and sets the prompt to signify that you are
in a Python virtual environment. Makes use of the command line switches as
well as the `pyvenv.cfg` file values present in the virtual environment.
.Parameter VenvDir
Path to the directory that contains the virtual environment to activate. The
default value for this is the parent of the directory that the Activate.ps1
script is located within.
.Parameter Prompt
The prompt prefix to display when this virtual environment is activated. By
default, this prompt is the name of the virtual environment folder (VenvDir)
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
.Example
Activate.ps1
Activates the Python virtual environment that contains the Activate.ps1 script.
.Example
Activate.ps1 -Verbose
Activates the Python virtual environment that contains the Activate.ps1 script,
and shows extra information about the activation as it executes.
.Example
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
Activates the Python virtual environment located in the specified location.
.Example
Activate.ps1 -Prompt "MyPython"
Activates the Python virtual environment that contains the Activate.ps1 script,
and prefixes the current prompt with the specified string (surrounded in
parentheses) while the virtual environment is active.
.Notes
On Windows, it may be required to enable this Activate.ps1 script by setting the
execution policy for the user. You can do this by issuing the following PowerShell
command:
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
For more information on Execution Policies:
https://go.microsoft.com/fwlink/?LinkID=135170
#>
Param(
[Parameter(Mandatory = $false)]
[String]
$VenvDir,
[Parameter(Mandatory = $false)]
[String]
$Prompt
)
<# Function declarations --------------------------------------------------- #>
<#
.Synopsis
Remove all shell session elements added by the Activate script, including the
addition of the virtual environment's Python executable from the beginning of
the PATH variable.
.Parameter NonDestructive
If present, do not remove this function from the global namespace for the
session.
#>
function global:deactivate ([switch]$NonDestructive) {
# Revert to original values
# The prior prompt:
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
}
# The prior PYTHONHOME:
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
}
# The prior PATH:
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
}
# Just remove the VIRTUAL_ENV altogether:
if (Test-Path -Path Env:VIRTUAL_ENV) {
Remove-Item -Path env:VIRTUAL_ENV
}
# Just remove VIRTUAL_ENV_PROMPT altogether.
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
}
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
}
# Leave deactivate function in the global namespace if requested:
if (-not $NonDestructive) {
Remove-Item -Path function:deactivate
}
}
<#
.Description
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
given folder, and returns them in a map.
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
two strings separated by `=` (with any amount of whitespace surrounding the =)
then it is considered a `key = value` line. The left hand string is the key,
the right hand is the value.
If the value starts with a `'` or a `"` then the first and last character is
stripped from the value before being captured.
.Parameter ConfigDir
Path to the directory that contains the `pyvenv.cfg` file.
#>
function Get-PyVenvConfig(
[String]
$ConfigDir
) {
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
# An empty map will be returned if no config file is found.
$pyvenvConfig = @{ }
if ($pyvenvConfigPath) {
Write-Verbose "File exists, parse `key = value` lines"
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
$pyvenvConfigContent | ForEach-Object {
$keyval = $PSItem -split "\s*=\s*", 2
if ($keyval[0] -and $keyval[1]) {
$val = $keyval[1]
# Remove extraneous quotations around a string value.
if ("'""".Contains($val.Substring(0, 1))) {
$val = $val.Substring(1, $val.Length - 2)
}
$pyvenvConfig[$keyval[0]] = $val
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
}
}
}
return $pyvenvConfig
}
<# Begin Activate script --------------------------------------------------- #>
# Determine the containing directory of this script
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
$VenvExecDir = Get-Item -Path $VenvExecPath
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
# Set values required in priority: CmdLine, ConfigFile, Default
# First, get the location of the virtual environment, it might not be
# VenvExecDir if specified on the command line.
if ($VenvDir) {
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
}
else {
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
Write-Verbose "VenvDir=$VenvDir"
}
# Next, read the `pyvenv.cfg` file to determine any required value such
# as `prompt`.
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
# Next, set the prompt from the command line, or the config file, or
# just use the name of the virtual environment folder.
if ($Prompt) {
Write-Verbose "Prompt specified as argument, using '$Prompt'"
}
else {
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
$Prompt = $pyvenvCfg['prompt'];
}
else {
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
$Prompt = Split-Path -Path $venvDir -Leaf
}
}
Write-Verbose "Prompt = '$Prompt'"
Write-Verbose "VenvDir='$VenvDir'"
# Deactivate any currently active virtual environment, but leave the
# deactivate function in place.
deactivate -nondestructive
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
# that there is an activated venv.
$env:VIRTUAL_ENV = $VenvDir
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
Write-Verbose "Setting prompt to '$Prompt'"
# Set the prompt to include the env name
# Make sure _OLD_VIRTUAL_PROMPT is global
function global:_OLD_VIRTUAL_PROMPT { "" }
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
function global:prompt {
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
_OLD_VIRTUAL_PROMPT
}
$env:VIRTUAL_ENV_PROMPT = $Prompt
}
# Clear PYTHONHOME
if (Test-Path -Path Env:PYTHONHOME) {
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
Remove-Item -Path Env:PYTHONHOME
}
# Add the venv to the PATH
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"

View File

@@ -0,0 +1,63 @@
# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
PATH="${_OLD_VIRTUAL_PATH:-}"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# Call hash to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
hash -r 2> /dev/null
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
PS1="${_OLD_VIRTUAL_PS1:-}"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
unset VIRTUAL_ENV_PROMPT
if [ ! "${1:-}" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
VIRTUAL_ENV='D:\Code\Project\FT-Platform\request\xrequest'
export VIRTUAL_ENV
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/"Scripts":$PATH"
export PATH
# unset PYTHONHOME if set
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
# could use `if (set -u; : $PYTHONHOME) ;` in bash
if [ -n "${PYTHONHOME:-}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
PS1='(xrequest) '"${PS1:-}"
export PS1
VIRTUAL_ENV_PROMPT='(xrequest) '
export VIRTUAL_ENV_PROMPT
fi
# Call hash to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
hash -r 2> /dev/null

View File

@@ -0,0 +1,34 @@
@echo off
rem This file is UTF-8 encoded, so we need to update the current code page while executing it
for /f "tokens=2 delims=:." %%a in ('"%SystemRoot%\System32\chcp.com"') do (
set _OLD_CODEPAGE=%%a
)
if defined _OLD_CODEPAGE (
"%SystemRoot%\System32\chcp.com" 65001 > nul
)
set "VIRTUAL_ENV=D:\Code\Project\FT-Platform\request\xrequest"
if not defined PROMPT set PROMPT=$P$G
if defined _OLD_VIRTUAL_PROMPT set PROMPT=%_OLD_VIRTUAL_PROMPT%
if defined _OLD_VIRTUAL_PYTHONHOME set PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%
set _OLD_VIRTUAL_PROMPT=%PROMPT%
set PROMPT=(xrequest) %PROMPT%
if defined PYTHONHOME set _OLD_VIRTUAL_PYTHONHOME=%PYTHONHOME%
set PYTHONHOME=
if defined _OLD_VIRTUAL_PATH set PATH=%_OLD_VIRTUAL_PATH%
if not defined _OLD_VIRTUAL_PATH set _OLD_VIRTUAL_PATH=%PATH%
set "PATH=%VIRTUAL_ENV%\Scripts;%PATH%"
set "VIRTUAL_ENV_PROMPT=(xrequest) "
:END
if defined _OLD_CODEPAGE (
"%SystemRoot%\System32\chcp.com" %_OLD_CODEPAGE% > nul
set _OLD_CODEPAGE=
)

View File

@@ -0,0 +1,22 @@
@echo off
if defined _OLD_VIRTUAL_PROMPT (
set "PROMPT=%_OLD_VIRTUAL_PROMPT%"
)
set _OLD_VIRTUAL_PROMPT=
if defined _OLD_VIRTUAL_PYTHONHOME (
set "PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%"
set _OLD_VIRTUAL_PYTHONHOME=
)
if defined _OLD_VIRTUAL_PATH (
set "PATH=%_OLD_VIRTUAL_PATH%"
)
set _OLD_VIRTUAL_PATH=
set VIRTUAL_ENV=
set VIRTUAL_ENV_PROMPT=
:END

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,5 @@
home = D:\Softwares\Anaconda\envs\ft
include-system-site-packages = false
version = 3.11.14
executable = D:\Softwares\Anaconda\envs\ft\python.exe
command = D:\Softwares\Anaconda\envs\ft\python.exe -m venv D:\Code\Project\FT-Platform\request\xrequest

File diff suppressed because it is too large Load Diff